aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-09-24 01:52:47 -0400
committerJeff Garzik <jeff@garzik.org>2006-09-24 01:52:47 -0400
commit23930fa1cebfea6f79881c588ccd1b0781e49e3f (patch)
tree36d29e3f83661c4f5f45b6f74ac0d5f9886867a8 /drivers
parent36b35a5be0e4b406acd816e2122d153e875105be (diff)
parent4f5537de7c1531398e84e18a24f667e49cc94208 (diff)
Merge branch 'master' into upstream
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/libata-eh.c1
-rw-r--r--drivers/ata/sata_svw.c2
-rw-r--r--drivers/atm/he.c4
-rw-r--r--drivers/base/hypervisor.c3
-rw-r--r--drivers/block/DAC960.c2
-rw-r--r--drivers/block/cciss_scsi.c14
-rw-r--r--drivers/block/cryptoloop.c160
-rw-r--r--drivers/char/Kconfig22
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/agp/agp.h2
-rw-r--r--drivers/char/agp/backend.c2
-rw-r--r--drivers/char/agp/efficeon-agp.c16
-rw-r--r--drivers/char/agp/frontend.c27
-rw-r--r--drivers/char/agp/generic.c39
-rw-r--r--drivers/char/agp/intel-agp.c173
-rw-r--r--drivers/char/agp/uninorth-agp.c4
-rw-r--r--drivers/char/agp/via-agp.c4
-rw-r--r--drivers/char/briq_panel.c271
-rw-r--r--drivers/char/hvc_console.c18
-rw-r--r--drivers/char/hvc_console.h2
-rw-r--r--drivers/char/hvc_iseries.c594
-rw-r--r--drivers/char/hvc_rtas.c2
-rw-r--r--drivers/char/hvc_vio.c7
-rw-r--r--drivers/char/hvsi.c7
-rw-r--r--drivers/char/istallion.c2
-rw-r--r--drivers/char/tpm/tpm_atmel.h4
-rw-r--r--drivers/char/viocons.c31
-rw-r--r--drivers/char/viotape.c6
-rw-r--r--drivers/cpufreq/cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c173
-rw-r--r--drivers/cpufreq/cpufreq_stats.c2
-rw-r--r--drivers/crypto/Kconfig45
-rw-r--r--drivers/crypto/Makefile8
-rw-r--r--drivers/crypto/padlock-aes.c258
-rw-r--r--drivers/crypto/padlock-generic.c63
-rw-r--r--drivers/crypto/padlock-sha.c318
-rw-r--r--drivers/crypto/padlock.c58
-rw-r--r--drivers/crypto/padlock.h17
-rw-r--r--drivers/i2c/busses/i2c-powermac.c3
-rw-r--r--drivers/ide/ppc/pmac.c8
-rw-r--r--drivers/infiniband/Kconfig4
-rw-r--r--drivers/infiniband/Makefile4
-rw-r--r--drivers/infiniband/core/Makefile4
-rw-r--r--drivers/infiniband/core/addr.c22
-rw-r--r--drivers/infiniband/core/cache.c5
-rw-r--r--drivers/infiniband/core/cm.c66
-rw-r--r--drivers/infiniband/core/cma.c403
-rw-r--r--drivers/infiniband/core/device.c6
-rw-r--r--drivers/infiniband/core/iwcm.c1019
-rw-r--r--drivers/infiniband/core/iwcm.h62
-rw-r--r--drivers/infiniband/core/mad.c19
-rw-r--r--drivers/infiniband/core/mad_priv.h2
-rw-r--r--drivers/infiniband/core/mad_rmpp.c94
-rw-r--r--drivers/infiniband/core/sa_query.c67
-rw-r--r--drivers/infiniband/core/smi.c16
-rw-r--r--drivers/infiniband/core/sysfs.c13
-rw-r--r--drivers/infiniband/core/ucm.c9
-rw-r--r--drivers/infiniband/core/user_mad.c7
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c64
-rw-r--r--drivers/infiniband/core/verbs.c21
-rw-r--r--drivers/infiniband/hw/amso1100/Kbuild8
-rw-r--r--drivers/infiniband/hw/amso1100/Kconfig15
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c1255
-rw-r--r--drivers/infiniband/hw/amso1100/c2.h551
-rw-r--r--drivers/infiniband/hw/amso1100/c2_ae.c321
-rw-r--r--drivers/infiniband/hw/amso1100/c2_ae.h108
-rw-r--r--drivers/infiniband/hw/amso1100/c2_alloc.c144
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cm.c452
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c433
-rw-r--r--drivers/infiniband/hw/amso1100/c2_intr.c209
-rw-r--r--drivers/infiniband/hw/amso1100/c2_mm.c375
-rw-r--r--drivers/infiniband/hw/amso1100/c2_mq.c174
-rw-r--r--drivers/infiniband/hw/amso1100/c2_mq.h106
-rw-r--r--drivers/infiniband/hw/amso1100/c2_pd.c89
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c870
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.h181
-rw-r--r--drivers/infiniband/hw/amso1100/c2_qp.c975
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c664
-rw-r--r--drivers/infiniband/hw/amso1100/c2_status.h158
-rw-r--r--drivers/infiniband/hw/amso1100/c2_user.h82
-rw-r--r--drivers/infiniband/hw/amso1100/c2_vq.c260
-rw-r--r--drivers/infiniband/hw/amso1100/c2_vq.h63
-rw-r--r--drivers/infiniband/hw/amso1100/c2_wr.h1520
-rw-r--r--drivers/infiniband/hw/ehca/Kconfig16
-rw-r--r--drivers/infiniband/hw/ehca/Makefile16
-rw-r--r--drivers/infiniband/hw/ehca/ehca_av.c271
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h346
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes_pSeries.h236
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c427
-rw-r--r--drivers/infiniband/hw/ehca/ehca_eq.c185
-rw-r--r--drivers/infiniband/hw/ehca/ehca_hca.c241
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c762
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.h77
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h182
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c818
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mcast.c131
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c2261
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.h140
-rw-r--r--drivers/infiniband/hw/ehca/ehca_pd.c114
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qes.h259
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c1507
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c653
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c111
-rw-r--r--drivers/infiniband/hw/ehca/ehca_tools.h172
-rw-r--r--drivers/infiniband/hw/ehca/ehca_uverbs.c392
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.c874
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.h261
-rw-r--r--drivers/infiniband/hw/ehca/hcp_phyp.c80
-rw-r--r--drivers/infiniband/hw/ehca/hcp_phyp.h90
-rw-r--r--drivers/infiniband/hw/ehca/hipz_fns.h68
-rw-r--r--drivers/infiniband/hw/ehca/hipz_fns_core.h100
-rw-r--r--drivers/infiniband/hw/ehca/hipz_hw.h388
-rw-r--r--drivers/infiniband/hw/ehca/ipz_pt_fn.c149
-rw-r--r--drivers/infiniband/hw/ehca/ipz_pt_fn.h247
-rw-r--r--drivers/infiniband/hw/ipath/Kconfig21
-rw-r--r--drivers/infiniband/hw/ipath/Makefile29
-rw-r--r--drivers/infiniband/hw/ipath/ipath_common.h19
-rw-r--r--drivers/infiniband/hw/ipath/ipath_cq.c183
-rw-r--r--drivers/infiniband/hw/ipath/ipath_debug.h2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_diag.c155
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c349
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c35
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6110.c (renamed from drivers/infiniband/hw/ipath/ipath_ht400.c)53
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6120.c (renamed from drivers/infiniband/hw/ipath/ipath_pe800.c)82
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c21
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c24
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h57
-rw-r--r--drivers/infiniband/hw/ipath/ipath_keys.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_layer.c1179
-rw-r--r--drivers/infiniband/hw/ipath/ipath_layer.h115
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c339
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mmap.c122
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mr.c12
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c242
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c9
-rw-r--r--drivers/infiniband/hw/ipath/ipath_registers.h7
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c160
-rw-r--r--drivers/infiniband/hw/ipath/ipath_srq.c244
-rw-r--r--drivers/infiniband/hw/ipath/ipath_stats.c27
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sysfs.c41
-rw-r--r--drivers/infiniband/hw/ipath/ipath_uc.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c182
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c687
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h252
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs_mcast.c7
-rw-r--r--drivers/infiniband/hw/ipath/ipath_wc_ppc64.c52
-rw-r--r--drivers/infiniband/hw/ipath/verbs_debug.h108
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c62
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c88
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c20
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_uar.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c194
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c37
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c34
-rw-r--r--drivers/infiniband/ulp/iser/Kconfig2
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c19
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h8
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c80
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c10
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c43
-rw-r--r--drivers/macintosh/adbhid.c2
-rw-r--r--drivers/macintosh/macio_asic.c10
-rw-r--r--drivers/macintosh/macio_sysfs.c8
-rw-r--r--drivers/macintosh/smu.c19
-rw-r--r--drivers/macintosh/therm_adt746x.c8
-rw-r--r--drivers/macintosh/therm_pm72.c14
-rw-r--r--drivers/macintosh/therm_windtunnel.c4
-rw-r--r--drivers/macintosh/via-cuda.c4
-rw-r--r--drivers/macintosh/via-pmu-led.c2
-rw-r--r--drivers/macintosh/via-pmu.c10
-rw-r--r--drivers/macintosh/windfarm_pm81.c4
-rw-r--r--drivers/macintosh/windfarm_pm91.c2
-rw-r--r--drivers/macintosh/windfarm_smu_controls.c13
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c8
-rw-r--r--drivers/macintosh/windfarm_smu_sensors.c12
-rw-r--r--drivers/md/dm-crypt.c146
-rw-r--r--drivers/message/fusion/mptfc.c100
-rw-r--r--drivers/message/fusion/mptsas.c19
-rw-r--r--drivers/mtd/Kconfig10
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c87
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c112
-rw-r--r--drivers/mtd/chips/jedec_probe.c14
-rw-r--r--drivers/mtd/devices/block2mtd.c93
-rw-r--r--drivers/mtd/devices/m25p80.c12
-rw-r--r--drivers/mtd/devices/pmc551.c1158
-rw-r--r--drivers/mtd/maps/Kconfig20
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/amd76xrom.c5
-rw-r--r--drivers/mtd/maps/arctic-mtd.c14
-rw-r--r--drivers/mtd/maps/beech-mtd.c14
-rw-r--r--drivers/mtd/maps/cstm_mips_ixx.c18
-rw-r--r--drivers/mtd/maps/ebony.c4
-rw-r--r--drivers/mtd/maps/fortunet.c3
-rw-r--r--drivers/mtd/maps/ichxrom.c3
-rw-r--r--drivers/mtd/maps/iq80310.c118
-rw-r--r--drivers/mtd/maps/ixp4xx.c2
-rw-r--r--drivers/mtd/maps/l440gx.c12
-rw-r--r--drivers/mtd/maps/lasat.c2
-rw-r--r--drivers/mtd/maps/nettel.c34
-rw-r--r--drivers/mtd/maps/ocotea.c4
-rw-r--r--drivers/mtd/maps/pcmciamtd.c4
-rw-r--r--drivers/mtd/maps/physmap.c33
-rw-r--r--drivers/mtd/maps/redwood.c11
-rw-r--r--drivers/mtd/maps/sbc8240.c11
-rw-r--r--drivers/mtd/maps/scx200_docflash.c9
-rw-r--r--drivers/mtd/maps/walnut.c4
-rw-r--r--drivers/mtd/mtdchar.c9
-rw-r--r--drivers/mtd/mtdcore.c10
-rw-r--r--drivers/mtd/nand/Kconfig2
-rw-r--r--drivers/mtd/nand/au1550nd.c11
-rw-r--r--drivers/mtd/nand/edb7312.c3
-rw-r--r--drivers/mtd/nand/nand_base.c2
-rw-r--r--drivers/mtd/nand/ndfc.c2
-rw-r--r--drivers/mtd/nand/ppchameleonevb.c7
-rw-r--r--drivers/mtd/nand/sharpsl.c7
-rw-r--r--drivers/mtd/ssfdc.c474
-rw-r--r--drivers/net/3c59x.c2
-rw-r--r--drivers/net/8139cp.c6
-rw-r--r--drivers/net/acenic.c8
-rw-r--r--drivers/net/arcnet/com20020-pci.c1
-rw-r--r--drivers/net/bmac.c13
-rw-r--r--drivers/net/bnx2.c2
-rw-r--r--drivers/net/cassini.c4
-rw-r--r--drivers/net/chelsio/sge.c10
-rw-r--r--drivers/net/dl2k.c2
-rw-r--r--drivers/net/e1000/e1000_main.c8
-rw-r--r--drivers/net/forcedeth.c3
-rw-r--r--drivers/net/gianfar.c2
-rw-r--r--drivers/net/hamachi.c2
-rw-r--r--drivers/net/ibm_emac/ibm_emac_core.c2
-rw-r--r--drivers/net/ibmveth.c3
-rw-r--r--drivers/net/ibmveth.h27
-rw-r--r--drivers/net/ioc3-eth.c2
-rw-r--r--drivers/net/irda/ali-ircc.c8
-rw-r--r--drivers/net/irda/irport.c4
-rw-r--r--drivers/net/irda/via-ircc.c5
-rw-r--r--drivers/net/irda/w83977af_ir.c4
-rw-r--r--drivers/net/ixgb/ixgb_main.c2
-rw-r--r--drivers/net/lp486e.c6
-rw-r--r--drivers/net/mace.c2
-rw-r--r--drivers/net/mv643xx_eth.c4
-rw-r--r--drivers/net/myri10ge/myri10ge.c8
-rw-r--r--drivers/net/ns83820.c2
-rw-r--r--drivers/net/ppp_mppe.c68
-rw-r--r--drivers/net/r8169.c2
-rw-r--r--drivers/net/s2io.c2
-rw-r--r--drivers/net/sk98lin/skge.c6
-rw-r--r--drivers/net/skge.c4
-rw-r--r--drivers/net/sky2.c6
-rw-r--r--drivers/net/spider_net.c12
-rw-r--r--drivers/net/starfire.c6
-rw-r--r--drivers/net/sungem.c6
-rw-r--r--drivers/net/sunhme.c6
-rw-r--r--drivers/net/tg3.c199
-rw-r--r--drivers/net/typhoon.c2
-rw-r--r--drivers/net/via-rhine.c2
-rw-r--r--drivers/net/via-velocity.c2
-rw-r--r--drivers/net/wireless/airo.c22
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c32
-rw-r--r--drivers/s390/Kconfig30
-rw-r--r--drivers/s390/block/dasd.c8
-rw-r--r--drivers/s390/block/dasd_devmap.c82
-rw-r--r--drivers/s390/block/dasd_eer.c2
-rw-r--r--drivers/s390/block/dasd_int.h1
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/Makefile1
-rw-r--r--drivers/s390/char/monwriter.c292
-rw-r--r--drivers/s390/char/vmcp.c2
-rw-r--r--drivers/s390/char/vmcp.h2
-rw-r--r--drivers/s390/cio/chsc.c5
-rw-r--r--drivers/s390/cio/cio.c95
-rw-r--r--drivers/s390/cio/css.c203
-rw-r--r--drivers/s390/cio/device.c109
-rw-r--r--drivers/s390/cio/device_fsm.c40
-rw-r--r--drivers/s390/cio/device_ops.c17
-rw-r--r--drivers/s390/cio/device_pgid.c81
-rw-r--r--drivers/s390/cio/qdio.c4
-rw-r--r--drivers/s390/cio/qdio.h16
-rw-r--r--drivers/s390/crypto/Makefile15
-rw-r--r--drivers/s390/crypto/ap_bus.c1221
-rw-r--r--drivers/s390/crypto/ap_bus.h158
-rw-r--r--drivers/s390/crypto/z90common.h166
-rw-r--r--drivers/s390/crypto/z90crypt.h71
-rw-r--r--drivers/s390/crypto/z90hardware.c2531
-rw-r--r--drivers/s390/crypto/z90main.c3379
-rw-r--r--drivers/s390/crypto/zcrypt_api.c1091
-rw-r--r--drivers/s390/crypto/zcrypt_api.h141
-rw-r--r--drivers/s390/crypto/zcrypt_cca_key.h350
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c435
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.h126
-rw-r--r--drivers/s390/crypto/zcrypt_error.h133
-rw-r--r--drivers/s390/crypto/zcrypt_mono.c100
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c418
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.h117
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c630
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.h176
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c951
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.h79
-rw-r--r--drivers/s390/s390mach.c17
-rw-r--r--drivers/s390/scsi/zfcp_aux.c84
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c13
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c13
-rw-r--r--drivers/s390/scsi/zfcp_def.h32
-rw-r--r--drivers/s390/scsi/zfcp_erp.c231
-rw-r--r--drivers/s390/scsi/zfcp_ext.h18
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c299
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c112
-rw-r--r--drivers/s390/sysinfo.c455
-rw-r--r--drivers/scsi/BusLogic.c61
-rw-r--r--drivers/scsi/Kconfig32
-rw-r--r--drivers/scsi/Makefile5
-rw-r--r--drivers/scsi/a2091.c6
-rw-r--r--drivers/scsi/a2091.h4
-rw-r--r--drivers/scsi/a3000.c8
-rw-r--r--drivers/scsi/a3000.h4
-rw-r--r--drivers/scsi/aacraid/aachba.c60
-rw-r--r--drivers/scsi/aacraid/aacraid.h20
-rw-r--r--drivers/scsi/aacraid/commctrl.c25
-rw-r--r--drivers/scsi/aacraid/comminit.c13
-rw-r--r--drivers/scsi/aacraid/commsup.c279
-rw-r--r--drivers/scsi/aacraid/dpcsup.c10
-rw-r--r--drivers/scsi/aacraid/linit.c35
-rw-r--r--drivers/scsi/aacraid/rkt.c446
-rw-r--r--drivers/scsi/aacraid/rx.c117
-rw-r--r--drivers/scsi/aacraid/sa.c21
-rw-r--r--drivers/scsi/advansys.c90
-rw-r--r--drivers/scsi/aha152x.c53
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c4
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c23
-rw-r--r--drivers/scsi/aic7xxx_old.c11
-rw-r--r--drivers/scsi/aic94xx/Kconfig41
-rw-r--r--drivers/scsi/aic94xx/Makefile39
-rw-r--r--drivers/scsi/aic94xx/aic94xx.h114
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dev.c353
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dump.c959
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dump.h52
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c1376
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.h397
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c866
-rw-r--r--drivers/scsi/aic94xx/aic94xx_reg.c332
-rw-r--r--drivers/scsi/aic94xx/aic94xx_reg.h302
-rw-r--r--drivers/scsi/aic94xx/aic94xx_reg_def.h2398
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sas.h785
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c758
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sds.c1089
-rw-r--r--drivers/scsi/aic94xx/aic94xx_seq.c1404
-rw-r--r--drivers/scsi/aic94xx/aic94xx_seq.h70
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c642
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c636
-rw-r--r--drivers/scsi/arcmsr/Makefile6
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h472
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c381
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c1496
-rw-r--r--drivers/scsi/dpt_i2o.c7
-rw-r--r--drivers/scsi/eata_generic.h1
-rw-r--r--drivers/scsi/eata_pio.c127
-rw-r--r--drivers/scsi/fcal.c3
-rw-r--r--drivers/scsi/g_NCR5380.c3
-rw-r--r--drivers/scsi/gvp11.c8
-rw-r--r--drivers/scsi/gvp11.h4
-rw-r--r--drivers/scsi/hosts.c7
-rw-r--r--drivers/scsi/hptiop.c1
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c11
-rw-r--r--drivers/scsi/ipr.c34
-rw-r--r--drivers/scsi/ipr.h82
-rw-r--r--drivers/scsi/iscsi_tcp.c801
-rw-r--r--drivers/scsi/iscsi_tcp.h44
-rw-r--r--drivers/scsi/libiscsi.c144
-rw-r--r--drivers/scsi/libsas/Kconfig39
-rw-r--r--drivers/scsi/libsas/Makefile36
-rw-r--r--drivers/scsi/libsas/sas_discover.c749
-rw-r--r--drivers/scsi/libsas/sas_dump.c76
-rw-r--r--drivers/scsi/libsas/sas_dump.h42
-rw-r--r--drivers/scsi/libsas/sas_event.c75
-rw-r--r--drivers/scsi/libsas/sas_expander.c1855
-rw-r--r--drivers/scsi/libsas/sas_init.c267
-rw-r--r--drivers/scsi/libsas/sas_internal.h146
-rw-r--r--drivers/scsi/libsas/sas_phy.c158
-rw-r--r--drivers/scsi/libsas/sas_port.c279
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c786
-rw-r--r--drivers/scsi/lpfc/lpfc.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c285
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c186
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac53c94.c2
-rw-r--r--drivers/scsi/megaraid.c9
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c16
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c36
-rw-r--r--drivers/scsi/mesh.c5
-rw-r--r--drivers/scsi/mvme147.c6
-rw-r--r--drivers/scsi/mvme147.h4
-rw-r--r--drivers/scsi/scsi.c58
-rw-r--r--drivers/scsi/scsi.h2
-rw-r--r--drivers/scsi/scsi_debug.c230
-rw-r--r--drivers/scsi/scsi_lib.c10
-rw-r--r--drivers/scsi/scsi_netlink.c199
-rw-r--r--drivers/scsi/scsi_priv.h11
-rw-r--r--drivers/scsi/scsi_proc.c4
-rw-r--r--drivers/scsi/scsi_scan.c146
-rw-r--r--drivers/scsi/scsi_transport_fc.c370
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c83
-rw-r--r--drivers/scsi/scsi_transport_spi.c30
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sgiwd93.c8
-rw-r--r--drivers/scsi/stex.c1252
-rw-r--r--drivers/scsi/ultrastor.c23
-rw-r--r--drivers/scsi/ultrastor.h12
-rw-r--r--drivers/serial/pmac_zilog.c9
-rw-r--r--drivers/usb/input/hid-core.c2
-rw-r--r--drivers/video/S3triofb.c12
-rw-r--r--drivers/video/aty/radeon_base.c8
-rw-r--r--drivers/video/aty/radeon_monitor.c12
-rw-r--r--drivers/video/aty/radeon_pm.c4
-rw-r--r--drivers/video/console/fbcon.c4
-rw-r--r--drivers/video/nvidia/nv_of.c12
-rw-r--r--drivers/video/offb.c22
-rw-r--r--drivers/video/riva/fbdev.c9
435 files changed, 60151 insertions, 13211 deletions
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index b1b510493c2d..3fa80f09f2ae 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -32,7 +32,6 @@
32 * 32 *
33 */ 33 */
34 34
35#include <linux/config.h>
36#include <linux/kernel.h> 35#include <linux/kernel.h>
37#include <scsi/scsi.h> 36#include <scsi/scsi.h>
38#include <scsi/scsi_host.h> 37#include <scsi/scsi_host.h>
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 2a7e3495cf16..d6d6658d8328 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -268,7 +268,7 @@ static int k2_sata_proc_info(struct Scsi_Host *shost, char *page, char **start,
268 /* Match it to a port node */ 268 /* Match it to a port node */
269 index = (ap == ap->host->ports[0]) ? 0 : 1; 269 index = (ap == ap->host->ports[0]) ? 0 : 1;
270 for (np = np->child; np != NULL; np = np->sibling) { 270 for (np = np->child; np != NULL; np = np->sibling) {
271 u32 *reg = (u32 *)get_property(np, "reg", NULL); 271 const u32 *reg = get_property(np, "reg", NULL);
272 if (!reg) 272 if (!reg)
273 continue; 273 continue;
274 if (index == *reg) 274 if (index == *reg)
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index dd96123a2b7f..41e052fecd7f 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -1912,7 +1912,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
1912 skb->tail = skb->data + skb->len; 1912 skb->tail = skb->data + skb->len;
1913#ifdef USE_CHECKSUM_HW 1913#ifdef USE_CHECKSUM_HW
1914 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) { 1914 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1915 skb->ip_summed = CHECKSUM_HW; 1915 skb->ip_summed = CHECKSUM_COMPLETE;
1916 skb->csum = TCP_CKSUM(skb->data, 1916 skb->csum = TCP_CKSUM(skb->data,
1917 he_vcc->pdu_len); 1917 he_vcc->pdu_len);
1918 } 1918 }
@@ -1928,7 +1928,9 @@ he_service_rbrq(struct he_dev *he_dev, int group)
1928#ifdef notdef 1928#ifdef notdef
1929 ATM_SKB(skb)->vcc = vcc; 1929 ATM_SKB(skb)->vcc = vcc;
1930#endif 1930#endif
1931 spin_unlock(&he_dev->global_lock);
1931 vcc->push(vcc, skb); 1932 vcc->push(vcc, skb);
1933 spin_lock(&he_dev->global_lock);
1932 1934
1933 atomic_inc(&vcc->stats->rx); 1935 atomic_inc(&vcc->stats->rx);
1934 1936
diff --git a/drivers/base/hypervisor.c b/drivers/base/hypervisor.c
index 0c85e9d6a448..7080b413ddc9 100644
--- a/drivers/base/hypervisor.c
+++ b/drivers/base/hypervisor.c
@@ -1,8 +1,9 @@
1/* 1/*
2 * hypervisor.c - /sys/hypervisor subsystem. 2 * hypervisor.c - /sys/hypervisor subsystem.
3 * 3 *
4 * This file is released under the GPLv2 4 * Copyright (C) IBM Corp. 2006
5 * 5 *
6 * This file is released under the GPLv2
6 */ 7 */
7 8
8#include <linux/kobject.h> 9#include <linux/kobject.h>
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 4cd23c3eab41..a360215dbce7 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -7115,7 +7115,7 @@ static struct pci_device_id DAC960_id_table[] = {
7115 { 7115 {
7116 .vendor = PCI_VENDOR_ID_MYLEX, 7116 .vendor = PCI_VENDOR_ID_MYLEX,
7117 .device = PCI_DEVICE_ID_MYLEX_DAC960_GEM, 7117 .device = PCI_DEVICE_ID_MYLEX_DAC960_GEM,
7118 .subvendor = PCI_ANY_ID, 7118 .subvendor = PCI_VENDOR_ID_MYLEX,
7119 .subdevice = PCI_ANY_ID, 7119 .subdevice = PCI_ANY_ID,
7120 .driver_data = (unsigned long) &DAC960_GEM_privdata, 7120 .driver_data = (unsigned long) &DAC960_GEM_privdata,
7121 }, 7121 },
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index afdff32f6724..05f79d7393f7 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -251,10 +251,6 @@ scsi_cmd_stack_free(int ctlr)
251 stk->pool = NULL; 251 stk->pool = NULL;
252} 252}
253 253
254/* scsi_device_types comes from scsi.h */
255#define DEVICETYPE(n) (n<0 || n>MAX_SCSI_DEVICE_CODE) ? \
256 "Unknown" : scsi_device_types[n]
257
258#if 0 254#if 0
259static int xmargin=8; 255static int xmargin=8;
260static int amargin=60; 256static int amargin=60;
@@ -389,7 +385,7 @@ cciss_scsi_add_entry(int ctlr, int hostno,
389 time anyway (the scsi layer's inquiries will show that info) */ 385 time anyway (the scsi layer's inquiries will show that info) */
390 if (hostno != -1) 386 if (hostno != -1)
391 printk("cciss%d: %s device c%db%dt%dl%d added.\n", 387 printk("cciss%d: %s device c%db%dt%dl%d added.\n",
392 ctlr, DEVICETYPE(sd->devtype), hostno, 388 ctlr, scsi_device_type(sd->devtype), hostno,
393 sd->bus, sd->target, sd->lun); 389 sd->bus, sd->target, sd->lun);
394 return 0; 390 return 0;
395} 391}
@@ -407,7 +403,7 @@ cciss_scsi_remove_entry(int ctlr, int hostno, int entry)
407 ccissscsi[ctlr].dev[i] = ccissscsi[ctlr].dev[i+1]; 403 ccissscsi[ctlr].dev[i] = ccissscsi[ctlr].dev[i+1];
408 ccissscsi[ctlr].ndevices--; 404 ccissscsi[ctlr].ndevices--;
409 printk("cciss%d: %s device c%db%dt%dl%d removed.\n", 405 printk("cciss%d: %s device c%db%dt%dl%d removed.\n",
410 ctlr, DEVICETYPE(sd.devtype), hostno, 406 ctlr, scsi_device_type(sd.devtype), hostno,
411 sd.bus, sd.target, sd.lun); 407 sd.bus, sd.target, sd.lun);
412} 408}
413 409
@@ -458,7 +454,7 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
458 if (found == 0) { /* device no longer present. */ 454 if (found == 0) { /* device no longer present. */
459 changes++; 455 changes++;
460 /* printk("cciss%d: %s device c%db%dt%dl%d removed.\n", 456 /* printk("cciss%d: %s device c%db%dt%dl%d removed.\n",
461 ctlr, DEVICETYPE(csd->devtype), hostno, 457 ctlr, scsi_device_type(csd->devtype), hostno,
462 csd->bus, csd->target, csd->lun); */ 458 csd->bus, csd->target, csd->lun); */
463 cciss_scsi_remove_entry(ctlr, hostno, i); 459 cciss_scsi_remove_entry(ctlr, hostno, i);
464 /* note, i not incremented */ 460 /* note, i not incremented */
@@ -468,7 +464,7 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
468 printk("cciss%d: device c%db%dt%dl%d type changed " 464 printk("cciss%d: device c%db%dt%dl%d type changed "
469 "(device type now %s).\n", 465 "(device type now %s).\n",
470 ctlr, hostno, csd->bus, csd->target, csd->lun, 466 ctlr, hostno, csd->bus, csd->target, csd->lun,
471 DEVICETYPE(csd->devtype)); 467 scsi_device_type(csd->devtype));
472 csd->devtype = sd[j].devtype; 468 csd->devtype = sd[j].devtype;
473 i++; /* so just move along. */ 469 i++; /* so just move along. */
474 } else /* device is same as it ever was, */ 470 } else /* device is same as it ever was, */
@@ -1098,7 +1094,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
1098 if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) { 1094 if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
1099 printk(KERN_INFO "cciss%d: %s ignored, " 1095 printk(KERN_INFO "cciss%d: %s ignored, "
1100 "too many devices.\n", cntl_num, 1096 "too many devices.\n", cntl_num,
1101 DEVICETYPE(devtype)); 1097 scsi_device_type(devtype));
1102 break; 1098 break;
1103 } 1099 }
1104 memcpy(&currentsd[ncurrent].scsi3addr[0], 1100 memcpy(&currentsd[ncurrent].scsi3addr[0],
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c
index 3d4261c39f16..40535036e893 100644
--- a/drivers/block/cryptoloop.c
+++ b/drivers/block/cryptoloop.c
@@ -40,11 +40,13 @@ static int
40cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info) 40cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
41{ 41{
42 int err = -EINVAL; 42 int err = -EINVAL;
43 int cipher_len;
44 int mode_len;
43 char cms[LO_NAME_SIZE]; /* cipher-mode string */ 45 char cms[LO_NAME_SIZE]; /* cipher-mode string */
44 char *cipher; 46 char *cipher;
45 char *mode; 47 char *mode;
46 char *cmsp = cms; /* c-m string pointer */ 48 char *cmsp = cms; /* c-m string pointer */
47 struct crypto_tfm *tfm = NULL; 49 struct crypto_blkcipher *tfm;
48 50
49 /* encryption breaks for non sector aligned offsets */ 51 /* encryption breaks for non sector aligned offsets */
50 52
@@ -53,20 +55,39 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
53 55
54 strncpy(cms, info->lo_crypt_name, LO_NAME_SIZE); 56 strncpy(cms, info->lo_crypt_name, LO_NAME_SIZE);
55 cms[LO_NAME_SIZE - 1] = 0; 57 cms[LO_NAME_SIZE - 1] = 0;
56 cipher = strsep(&cmsp, "-"); 58
57 mode = strsep(&cmsp, "-"); 59 cipher = cmsp;
58 60 cipher_len = strcspn(cmsp, "-");
59 if (mode == NULL || strcmp(mode, "cbc") == 0) 61
60 tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_CBC | 62 mode = cmsp + cipher_len;
61 CRYPTO_TFM_REQ_MAY_SLEEP); 63 mode_len = 0;
62 else if (strcmp(mode, "ecb") == 0) 64 if (*mode) {
63 tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_ECB | 65 mode++;
64 CRYPTO_TFM_REQ_MAY_SLEEP); 66 mode_len = strcspn(mode, "-");
65 if (tfm == NULL) 67 }
68
69 if (!mode_len) {
70 mode = "cbc";
71 mode_len = 3;
72 }
73
74 if (cipher_len + mode_len + 3 > LO_NAME_SIZE)
66 return -EINVAL; 75 return -EINVAL;
67 76
68 err = tfm->crt_u.cipher.cit_setkey(tfm, info->lo_encrypt_key, 77 memmove(cms, mode, mode_len);
69 info->lo_encrypt_key_size); 78 cmsp = cms + mode_len;
79 *cmsp++ = '(';
80 memcpy(cmsp, info->lo_crypt_name, cipher_len);
81 cmsp += cipher_len;
82 *cmsp++ = ')';
83 *cmsp = 0;
84
85 tfm = crypto_alloc_blkcipher(cms, 0, CRYPTO_ALG_ASYNC);
86 if (IS_ERR(tfm))
87 return PTR_ERR(tfm);
88
89 err = crypto_blkcipher_setkey(tfm, info->lo_encrypt_key,
90 info->lo_encrypt_key_size);
70 91
71 if (err != 0) 92 if (err != 0)
72 goto out_free_tfm; 93 goto out_free_tfm;
@@ -75,99 +96,49 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
75 return 0; 96 return 0;
76 97
77 out_free_tfm: 98 out_free_tfm:
78 crypto_free_tfm(tfm); 99 crypto_free_blkcipher(tfm);
79 100
80 out: 101 out:
81 return err; 102 return err;
82} 103}
83 104
84 105
85typedef int (*encdec_ecb_t)(struct crypto_tfm *tfm, 106typedef int (*encdec_cbc_t)(struct blkcipher_desc *desc,
86 struct scatterlist *sg_out, 107 struct scatterlist *sg_out,
87 struct scatterlist *sg_in, 108 struct scatterlist *sg_in,
88 unsigned int nsg); 109 unsigned int nsg);
89 110
90
91static int
92cryptoloop_transfer_ecb(struct loop_device *lo, int cmd,
93 struct page *raw_page, unsigned raw_off,
94 struct page *loop_page, unsigned loop_off,
95 int size, sector_t IV)
96{
97 struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data;
98 struct scatterlist sg_out = { NULL, };
99 struct scatterlist sg_in = { NULL, };
100
101 encdec_ecb_t encdecfunc;
102 struct page *in_page, *out_page;
103 unsigned in_offs, out_offs;
104
105 if (cmd == READ) {
106 in_page = raw_page;
107 in_offs = raw_off;
108 out_page = loop_page;
109 out_offs = loop_off;
110 encdecfunc = tfm->crt_u.cipher.cit_decrypt;
111 } else {
112 in_page = loop_page;
113 in_offs = loop_off;
114 out_page = raw_page;
115 out_offs = raw_off;
116 encdecfunc = tfm->crt_u.cipher.cit_encrypt;
117 }
118
119 while (size > 0) {
120 const int sz = min(size, LOOP_IV_SECTOR_SIZE);
121
122 sg_in.page = in_page;
123 sg_in.offset = in_offs;
124 sg_in.length = sz;
125
126 sg_out.page = out_page;
127 sg_out.offset = out_offs;
128 sg_out.length = sz;
129
130 encdecfunc(tfm, &sg_out, &sg_in, sz);
131
132 size -= sz;
133 in_offs += sz;
134 out_offs += sz;
135 }
136
137 return 0;
138}
139
140typedef int (*encdec_cbc_t)(struct crypto_tfm *tfm,
141 struct scatterlist *sg_out,
142 struct scatterlist *sg_in,
143 unsigned int nsg, u8 *iv);
144
145static int 111static int
146cryptoloop_transfer_cbc(struct loop_device *lo, int cmd, 112cryptoloop_transfer(struct loop_device *lo, int cmd,
147 struct page *raw_page, unsigned raw_off, 113 struct page *raw_page, unsigned raw_off,
148 struct page *loop_page, unsigned loop_off, 114 struct page *loop_page, unsigned loop_off,
149 int size, sector_t IV) 115 int size, sector_t IV)
150{ 116{
151 struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data; 117 struct crypto_blkcipher *tfm = lo->key_data;
118 struct blkcipher_desc desc = {
119 .tfm = tfm,
120 .flags = CRYPTO_TFM_REQ_MAY_SLEEP,
121 };
152 struct scatterlist sg_out = { NULL, }; 122 struct scatterlist sg_out = { NULL, };
153 struct scatterlist sg_in = { NULL, }; 123 struct scatterlist sg_in = { NULL, };
154 124
155 encdec_cbc_t encdecfunc; 125 encdec_cbc_t encdecfunc;
156 struct page *in_page, *out_page; 126 struct page *in_page, *out_page;
157 unsigned in_offs, out_offs; 127 unsigned in_offs, out_offs;
128 int err;
158 129
159 if (cmd == READ) { 130 if (cmd == READ) {
160 in_page = raw_page; 131 in_page = raw_page;
161 in_offs = raw_off; 132 in_offs = raw_off;
162 out_page = loop_page; 133 out_page = loop_page;
163 out_offs = loop_off; 134 out_offs = loop_off;
164 encdecfunc = tfm->crt_u.cipher.cit_decrypt_iv; 135 encdecfunc = crypto_blkcipher_crt(tfm)->decrypt;
165 } else { 136 } else {
166 in_page = loop_page; 137 in_page = loop_page;
167 in_offs = loop_off; 138 in_offs = loop_off;
168 out_page = raw_page; 139 out_page = raw_page;
169 out_offs = raw_off; 140 out_offs = raw_off;
170 encdecfunc = tfm->crt_u.cipher.cit_encrypt_iv; 141 encdecfunc = crypto_blkcipher_crt(tfm)->encrypt;
171 } 142 }
172 143
173 while (size > 0) { 144 while (size > 0) {
@@ -183,7 +154,10 @@ cryptoloop_transfer_cbc(struct loop_device *lo, int cmd,
183 sg_out.offset = out_offs; 154 sg_out.offset = out_offs;
184 sg_out.length = sz; 155 sg_out.length = sz;
185 156
186 encdecfunc(tfm, &sg_out, &sg_in, sz, (u8 *)iv); 157 desc.info = iv;
158 err = encdecfunc(&desc, &sg_out, &sg_in, sz);
159 if (err)
160 return err;
187 161
188 IV++; 162 IV++;
189 size -= sz; 163 size -= sz;
@@ -195,32 +169,6 @@ cryptoloop_transfer_cbc(struct loop_device *lo, int cmd,
195} 169}
196 170
197static int 171static int
198cryptoloop_transfer(struct loop_device *lo, int cmd,
199 struct page *raw_page, unsigned raw_off,
200 struct page *loop_page, unsigned loop_off,
201 int size, sector_t IV)
202{
203 struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data;
204 if(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB)
205 {
206 lo->transfer = cryptoloop_transfer_ecb;
207 return cryptoloop_transfer_ecb(lo, cmd, raw_page, raw_off,
208 loop_page, loop_off, size, IV);
209 }
210 if(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_CBC)
211 {
212 lo->transfer = cryptoloop_transfer_cbc;
213 return cryptoloop_transfer_cbc(lo, cmd, raw_page, raw_off,
214 loop_page, loop_off, size, IV);
215 }
216
217 /* This is not supposed to happen */
218
219 printk( KERN_ERR "cryptoloop: unsupported cipher mode in cryptoloop_transfer!\n");
220 return -EINVAL;
221}
222
223static int
224cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg) 172cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg)
225{ 173{
226 return -EINVAL; 174 return -EINVAL;
@@ -229,9 +177,9 @@ cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg)
229static int 177static int
230cryptoloop_release(struct loop_device *lo) 178cryptoloop_release(struct loop_device *lo)
231{ 179{
232 struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data; 180 struct crypto_blkcipher *tfm = lo->key_data;
233 if (tfm != NULL) { 181 if (tfm != NULL) {
234 crypto_free_tfm(tfm); 182 crypto_free_blkcipher(tfm);
235 lo->key_data = NULL; 183 lo->key_data = NULL;
236 return 0; 184 return 0;
237 } 185 }
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index c40e487d9f5c..52ea94b891f5 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -495,6 +495,21 @@ config LEGACY_PTY_COUNT
495 When not in use, each legacy PTY occupies 12 bytes on 32-bit 495 When not in use, each legacy PTY occupies 12 bytes on 32-bit
496 architectures and 24 bytes on 64-bit architectures. 496 architectures and 24 bytes on 64-bit architectures.
497 497
498config BRIQ_PANEL
499 tristate 'Total Impact briQ front panel driver'
500 depends on PPC_CHRP
501 ---help---
502 The briQ is a small footprint CHRP computer with a frontpanel VFD, a
503 tristate led and two switches. It is the size of a CDROM drive.
504
505 If you have such one and want anything showing on the VFD then you
506 must answer Y here.
507
508 To compile this driver as a module, choose M here: the
509 module will be called briq_panel.
510
511 It's safe to say N here.
512
498config PRINTER 513config PRINTER
499 tristate "Parallel printer support" 514 tristate "Parallel printer support"
500 depends on PARPORT 515 depends on PARPORT
@@ -596,6 +611,13 @@ config HVC_CONSOLE
596 console. This driver allows each pSeries partition to have a console 611 console. This driver allows each pSeries partition to have a console
597 which is accessed via the HMC. 612 which is accessed via the HMC.
598 613
614config HVC_ISERIES
615 bool "iSeries Hypervisor Virtual Console support"
616 depends on PPC_ISERIES && !VIOCONS
617 select HVC_DRIVER
618 help
619 iSeries machines support a hypervisor virtual console.
620
599config HVC_RTAS 621config HVC_RTAS
600 bool "IBM RTAS Console support" 622 bool "IBM RTAS Console support"
601 depends on PPC_RTAS 623 depends on PPC_RTAS
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 6e0f4469d8bb..8c6dfc621520 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
42obj-$(CONFIG_SX) += sx.o generic_serial.o 42obj-$(CONFIG_SX) += sx.o generic_serial.o
43obj-$(CONFIG_RIO) += rio/ generic_serial.o 43obj-$(CONFIG_RIO) += rio/ generic_serial.o
44obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi.o 44obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi.o
45obj-$(CONFIG_HVC_ISERIES) += hvc_iseries.o
45obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o 46obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o
46obj-$(CONFIG_HVC_DRIVER) += hvc_console.o 47obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
47obj-$(CONFIG_RAW_DRIVER) += raw.o 48obj-$(CONFIG_RAW_DRIVER) += raw.o
@@ -51,6 +52,7 @@ obj-$(CONFIG_VIOCONS) += viocons.o
51obj-$(CONFIG_VIOTAPE) += viotape.o 52obj-$(CONFIG_VIOTAPE) += viotape.o
52obj-$(CONFIG_HVCS) += hvcs.o 53obj-$(CONFIG_HVCS) += hvcs.o
53obj-$(CONFIG_SGI_MBCS) += mbcs.o 54obj-$(CONFIG_SGI_MBCS) += mbcs.o
55obj-$(CONFIG_BRIQ_PANEL) += briq_panel.o
54 56
55obj-$(CONFIG_PRINTER) += lp.o 57obj-$(CONFIG_PRINTER) += lp.o
56obj-$(CONFIG_TIPAR) += tipar.o 58obj-$(CONFIG_TIPAR) += tipar.o
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 3c623b67ea1c..8b3317fd46c9 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -117,7 +117,7 @@ struct agp_bridge_driver {
117}; 117};
118 118
119struct agp_bridge_data { 119struct agp_bridge_data {
120 struct agp_version *version; 120 const struct agp_version *version;
121 struct agp_bridge_driver *driver; 121 struct agp_bridge_driver *driver;
122 struct vm_operations_struct *vm_ops; 122 struct vm_operations_struct *vm_ops;
123 void *previous_size; 123 void *previous_size;
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 509adc403250..d59e037ddd12 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -44,7 +44,7 @@
44 * past 0.99 at all due to some boolean logic error. */ 44 * past 0.99 at all due to some boolean logic error. */
45#define AGPGART_VERSION_MAJOR 0 45#define AGPGART_VERSION_MAJOR 0
46#define AGPGART_VERSION_MINOR 101 46#define AGPGART_VERSION_MINOR 101
47static struct agp_version agp_current_version = 47static const struct agp_version agp_current_version =
48{ 48{
49 .major = AGPGART_VERSION_MAJOR, 49 .major = AGPGART_VERSION_MAJOR,
50 .minor = AGPGART_VERSION_MINOR, 50 .minor = AGPGART_VERSION_MINOR,
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index b788b0a3bbf3..30f730ff81c1 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -337,13 +337,6 @@ static struct agp_bridge_driver efficeon_driver = {
337 .agp_destroy_page = agp_generic_destroy_page, 337 .agp_destroy_page = agp_generic_destroy_page,
338}; 338};
339 339
340
341static int agp_efficeon_resume(struct pci_dev *pdev)
342{
343 printk(KERN_DEBUG PFX "agp_efficeon_resume()\n");
344 return efficeon_configure();
345}
346
347static int __devinit agp_efficeon_probe(struct pci_dev *pdev, 340static int __devinit agp_efficeon_probe(struct pci_dev *pdev,
348 const struct pci_device_id *ent) 341 const struct pci_device_id *ent)
349{ 342{
@@ -414,11 +407,18 @@ static void __devexit agp_efficeon_remove(struct pci_dev *pdev)
414 agp_put_bridge(bridge); 407 agp_put_bridge(bridge);
415} 408}
416 409
410#ifdef CONFIG_PM
417static int agp_efficeon_suspend(struct pci_dev *dev, pm_message_t state) 411static int agp_efficeon_suspend(struct pci_dev *dev, pm_message_t state)
418{ 412{
419 return 0; 413 return 0;
420} 414}
421 415
416static int agp_efficeon_resume(struct pci_dev *pdev)
417{
418 printk(KERN_DEBUG PFX "agp_efficeon_resume()\n");
419 return efficeon_configure();
420}
421#endif
422 422
423static struct pci_device_id agp_efficeon_pci_table[] = { 423static struct pci_device_id agp_efficeon_pci_table[] = {
424 { 424 {
@@ -439,8 +439,10 @@ static struct pci_driver agp_efficeon_pci_driver = {
439 .id_table = agp_efficeon_pci_table, 439 .id_table = agp_efficeon_pci_table,
440 .probe = agp_efficeon_probe, 440 .probe = agp_efficeon_probe,
441 .remove = agp_efficeon_remove, 441 .remove = agp_efficeon_remove,
442#ifdef CONFIG_PM
442 .suspend = agp_efficeon_suspend, 443 .suspend = agp_efficeon_suspend,
443 .resume = agp_efficeon_resume, 444 .resume = agp_efficeon_resume,
445#endif
444}; 446};
445 447
446static int __init agp_efficeon_init(void) 448static int __init agp_efficeon_init(void)
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index d9c5a9142ad1..0f2ed2aa2d81 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -151,35 +151,12 @@ static void agp_add_seg_to_client(struct agp_client *client,
151 client->segments = seg; 151 client->segments = seg;
152} 152}
153 153
154/* Originally taken from linux/mm/mmap.c from the array
155 * protection_map.
156 * The original really should be exported to modules, or
157 * some routine which does the conversion for you
158 */
159
160static const pgprot_t my_protect_map[16] =
161{
162 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
163 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
164};
165
166static pgprot_t agp_convert_mmap_flags(int prot) 154static pgprot_t agp_convert_mmap_flags(int prot)
167{ 155{
168#define _trans(x,bit1,bit2) \
169((bit1==bit2)?(x&bit1):(x&bit1)?bit2:0)
170
171 unsigned long prot_bits; 156 unsigned long prot_bits;
172 pgprot_t temp;
173
174 prot_bits = _trans(prot, PROT_READ, VM_READ) |
175 _trans(prot, PROT_WRITE, VM_WRITE) |
176 _trans(prot, PROT_EXEC, VM_EXEC);
177
178 prot_bits |= VM_SHARED;
179 157
180 temp = my_protect_map[prot_bits & 0x0000000f]; 158 prot_bits = calc_vm_prot_bits(prot) | VM_SHARED;
181 159 return vm_get_page_prot(prot_bits);
182 return temp;
183} 160}
184 161
185static int agp_create_segment(struct agp_client *client, struct agp_region *region) 162static int agp_create_segment(struct agp_client *client, struct agp_region *region)
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index cc5ea347a8a7..0dcdb363923f 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -568,25 +568,34 @@ static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_
568 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); 568 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
569 goto done; 569 goto done;
570 570
571 } else if (*requested_mode & AGPSTAT3_4X) {
572 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
573 *bridge_agpstat |= AGPSTAT3_4X;
574 goto done;
575
571 } else { 576 } else {
572 577
573 /* 578 /*
574 * If we didn't specify AGPx8, we can only do x4. 579 * If we didn't specify an AGP mode, we see if both
575 * If the hardware can't do x4, we're up shit creek, and never 580 * the graphics card, and the bridge can do x8, and use if so.
576 * should have got this far. 581 * If not, we fall back to x4 mode.
577 */ 582 */
578 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); 583 if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) {
579 if ((*bridge_agpstat & AGPSTAT3_4X) && (*vga_agpstat & AGPSTAT3_4X)) 584 printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode supported by bridge & card (x8).\n");
580 *bridge_agpstat |= AGPSTAT3_4X; 585 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
581 else { 586 *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
582 printk(KERN_INFO PFX "Badness. Don't know which AGP mode to set. " 587 } else {
583 "[bridge_agpstat:%x vga_agpstat:%x fell back to:- bridge_agpstat:%x vga_agpstat:%x]\n", 588 printk(KERN_INFO PFX "Fell back to AGPx4 mode because");
584 origbridge, origvga, *bridge_agpstat, *vga_agpstat); 589 if (!(*bridge_agpstat & AGPSTAT3_8X)) {
585 if (!(*bridge_agpstat & AGPSTAT3_4X)) 590 printk("bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n", *bridge_agpstat, origbridge);
586 printk(KERN_INFO PFX "Bridge couldn't do AGP x4.\n"); 591 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
587 if (!(*vga_agpstat & AGPSTAT3_4X)) 592 *bridge_agpstat |= AGPSTAT3_4X;
588 printk(KERN_INFO PFX "Graphic card couldn't do AGP x4.\n"); 593 }
589 return; 594 if (!(*vga_agpstat & AGPSTAT3_8X)) {
595 printk("graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n", *vga_agpstat, origvga);
596 *vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
597 *vga_agpstat |= AGPSTAT3_4X;
598 }
590 } 599 }
591 } 600 }
592 601
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 61ac3809f997..d1ede7db5a12 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -2,14 +2,6 @@
2 * Intel AGPGART routines. 2 * Intel AGPGART routines.
3 */ 3 */
4 4
5/*
6 * Intel(R) 855GM/852GM and 865G support added by David Dawes
7 * <dawes@tungstengraphics.com>.
8 *
9 * Intel(R) 915G/915GM support added by Alan Hourihane
10 * <alanh@tungstengraphics.com>.
11 */
12
13#include <linux/module.h> 5#include <linux/module.h>
14#include <linux/pci.h> 6#include <linux/pci.h>
15#include <linux/init.h> 7#include <linux/init.h>
@@ -17,6 +9,21 @@
17#include <linux/agp_backend.h> 9#include <linux/agp_backend.h>
18#include "agp.h" 10#include "agp.h"
19 11
12#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
13#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972
14#define PCI_DEVICE_ID_INTEL_82965G_1_HB 0x2980
15#define PCI_DEVICE_ID_INTEL_82965G_1_IG 0x2982
16#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990
17#define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992
18#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0
19#define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2
20
21#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
22 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_1_HB || \
23 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
24 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB)
25
26
20/* Intel 815 register */ 27/* Intel 815 register */
21#define INTEL_815_APCONT 0x51 28#define INTEL_815_APCONT 0x51
22#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF 29#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
@@ -40,6 +47,8 @@
40#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4) 47#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
41#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4) 48#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
42 49
50/* Intel 965G registers */
51#define I965_MSAC 0x62
43 52
44/* Intel 7505 registers */ 53/* Intel 7505 registers */
45#define INTEL_I7505_APSIZE 0x74 54#define INTEL_I7505_APSIZE 0x74
@@ -354,6 +363,7 @@ static struct aper_size_info_fixed intel_i830_sizes[] =
354 /* The 64M mode still requires a 128k gatt */ 363 /* The 64M mode still requires a 128k gatt */
355 {64, 16384, 5}, 364 {64, 16384, 5},
356 {256, 65536, 6}, 365 {256, 65536, 6},
366 {512, 131072, 7},
357}; 367};
358 368
359static struct _intel_i830_private { 369static struct _intel_i830_private {
@@ -377,7 +387,11 @@ static void intel_i830_init_gtt_entries(void)
377 /* We obtain the size of the GTT, which is also stored (for some 387 /* We obtain the size of the GTT, which is also stored (for some
378 * reason) at the top of stolen memory. Then we add 4KB to that 388 * reason) at the top of stolen memory. Then we add 4KB to that
379 * for the video BIOS popup, which is also stored in there. */ 389 * for the video BIOS popup, which is also stored in there. */
380 size = agp_bridge->driver->fetch_size() + 4; 390
391 if (IS_I965)
392 size = 512 + 4;
393 else
394 size = agp_bridge->driver->fetch_size() + 4;
381 395
382 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB || 396 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
383 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { 397 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
@@ -423,7 +437,7 @@ static void intel_i830_init_gtt_entries(void)
423 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || 437 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB ||
424 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || 438 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB ||
425 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || 439 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB ||
426 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB) 440 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || IS_I965 )
427 gtt_entries = MB(48) - KB(size); 441 gtt_entries = MB(48) - KB(size);
428 else 442 else
429 gtt_entries = 0; 443 gtt_entries = 0;
@@ -433,7 +447,7 @@ static void intel_i830_init_gtt_entries(void)
433 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || 447 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB ||
434 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || 448 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB ||
435 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || 449 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB ||
436 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB) 450 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || IS_I965)
437 gtt_entries = MB(64) - KB(size); 451 gtt_entries = MB(64) - KB(size);
438 else 452 else
439 gtt_entries = 0; 453 gtt_entries = 0;
@@ -791,6 +805,77 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
791 805
792 return 0; 806 return 0;
793} 807}
808static int intel_i965_fetch_size(void)
809{
810 struct aper_size_info_fixed *values;
811 u32 offset = 0;
812 u8 temp;
813
814#define I965_512MB_ADDRESS_MASK (3<<1)
815
816 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
817
818 pci_read_config_byte(intel_i830_private.i830_dev, I965_MSAC, &temp);
819 temp &= I965_512MB_ADDRESS_MASK;
820 switch (temp) {
821 case 0x00:
822 offset = 0; /* 128MB */
823 break;
824 case 0x06:
825 offset = 3; /* 512MB */
826 break;
827 default:
828 case 0x02:
829 offset = 2; /* 256MB */
830 break;
831 }
832
833 agp_bridge->previous_size = agp_bridge->current_size = (void *)(values + offset);
834
835 return values[offset].size;
836}
837
838/* The intel i965 automatically initializes the agp aperture during POST.
839+ * Use the memory already set aside for in the GTT.
840+ */
841static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
842{
843 int page_order;
844 struct aper_size_info_fixed *size;
845 int num_entries;
846 u32 temp;
847
848 size = agp_bridge->current_size;
849 page_order = size->page_order;
850 num_entries = size->num_entries;
851 agp_bridge->gatt_table_real = NULL;
852
853 pci_read_config_dword(intel_i830_private.i830_dev, I915_MMADDR, &temp);
854
855 temp &= 0xfff00000;
856 intel_i830_private.gtt = ioremap((temp + (512 * 1024)) , 512 * 1024);
857
858 if (!intel_i830_private.gtt)
859 return -ENOMEM;
860
861
862 intel_i830_private.registers = ioremap(temp,128 * 4096);
863 if (!intel_i830_private.registers)
864 return -ENOMEM;
865
866 temp = readl(intel_i830_private.registers+I810_PGETBL_CTL) & 0xfffff000;
867 global_cache_flush(); /* FIXME: ? */
868
869 /* we have to call this as early as possible after the MMIO base address is known */
870 intel_i830_init_gtt_entries();
871
872 agp_bridge->gatt_table = NULL;
873
874 agp_bridge->gatt_bus_addr = temp;
875
876 return 0;
877}
878
794 879
795static int intel_fetch_size(void) 880static int intel_fetch_size(void)
796{ 881{
@@ -1307,7 +1392,7 @@ static struct agp_bridge_driver intel_830_driver = {
1307 .owner = THIS_MODULE, 1392 .owner = THIS_MODULE,
1308 .aperture_sizes = intel_i830_sizes, 1393 .aperture_sizes = intel_i830_sizes,
1309 .size_type = FIXED_APER_SIZE, 1394 .size_type = FIXED_APER_SIZE,
1310 .num_aperture_sizes = 3, 1395 .num_aperture_sizes = 4,
1311 .needs_scratch_page = TRUE, 1396 .needs_scratch_page = TRUE,
1312 .configure = intel_i830_configure, 1397 .configure = intel_i830_configure,
1313 .fetch_size = intel_i830_fetch_size, 1398 .fetch_size = intel_i830_fetch_size,
@@ -1469,7 +1554,7 @@ static struct agp_bridge_driver intel_915_driver = {
1469 .owner = THIS_MODULE, 1554 .owner = THIS_MODULE,
1470 .aperture_sizes = intel_i830_sizes, 1555 .aperture_sizes = intel_i830_sizes,
1471 .size_type = FIXED_APER_SIZE, 1556 .size_type = FIXED_APER_SIZE,
1472 .num_aperture_sizes = 3, 1557 .num_aperture_sizes = 4,
1473 .needs_scratch_page = TRUE, 1558 .needs_scratch_page = TRUE,
1474 .configure = intel_i915_configure, 1559 .configure = intel_i915_configure,
1475 .fetch_size = intel_i915_fetch_size, 1560 .fetch_size = intel_i915_fetch_size,
@@ -1489,6 +1574,29 @@ static struct agp_bridge_driver intel_915_driver = {
1489 .agp_destroy_page = agp_generic_destroy_page, 1574 .agp_destroy_page = agp_generic_destroy_page,
1490}; 1575};
1491 1576
1577static struct agp_bridge_driver intel_i965_driver = {
1578 .owner = THIS_MODULE,
1579 .aperture_sizes = intel_i830_sizes,
1580 .size_type = FIXED_APER_SIZE,
1581 .num_aperture_sizes = 4,
1582 .needs_scratch_page = TRUE,
1583 .configure = intel_i915_configure,
1584 .fetch_size = intel_i965_fetch_size,
1585 .cleanup = intel_i915_cleanup,
1586 .tlb_flush = intel_i810_tlbflush,
1587 .mask_memory = intel_i810_mask_memory,
1588 .masks = intel_i810_masks,
1589 .agp_enable = intel_i810_agp_enable,
1590 .cache_flush = global_cache_flush,
1591 .create_gatt_table = intel_i965_create_gatt_table,
1592 .free_gatt_table = intel_i830_free_gatt_table,
1593 .insert_memory = intel_i915_insert_entries,
1594 .remove_memory = intel_i915_remove_entries,
1595 .alloc_by_type = intel_i830_alloc_by_type,
1596 .free_by_type = intel_i810_free_by_type,
1597 .agp_alloc_page = agp_generic_alloc_page,
1598 .agp_destroy_page = agp_generic_destroy_page,
1599};
1492 1600
1493static struct agp_bridge_driver intel_7505_driver = { 1601static struct agp_bridge_driver intel_7505_driver = {
1494 .owner = THIS_MODULE, 1602 .owner = THIS_MODULE,
@@ -1684,6 +1792,35 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
1684 bridge->driver = &intel_845_driver; 1792 bridge->driver = &intel_845_driver;
1685 name = "945GM"; 1793 name = "945GM";
1686 break; 1794 break;
1795 case PCI_DEVICE_ID_INTEL_82946GZ_HB:
1796 if (find_i830(PCI_DEVICE_ID_INTEL_82946GZ_IG))
1797 bridge->driver = &intel_i965_driver;
1798 else
1799 bridge->driver = &intel_845_driver;
1800 name = "946GZ";
1801 break;
1802 case PCI_DEVICE_ID_INTEL_82965G_1_HB:
1803 if (find_i830(PCI_DEVICE_ID_INTEL_82965G_1_IG))
1804 bridge->driver = &intel_i965_driver;
1805 else
1806 bridge->driver = &intel_845_driver;
1807 name = "965G";
1808 break;
1809 case PCI_DEVICE_ID_INTEL_82965Q_HB:
1810 if (find_i830(PCI_DEVICE_ID_INTEL_82965Q_IG))
1811 bridge->driver = &intel_i965_driver;
1812 else
1813 bridge->driver = &intel_845_driver;
1814 name = "965Q";
1815 break;
1816 case PCI_DEVICE_ID_INTEL_82965G_HB:
1817 if (find_i830(PCI_DEVICE_ID_INTEL_82965G_IG))
1818 bridge->driver = &intel_i965_driver;
1819 else
1820 bridge->driver = &intel_845_driver;
1821 name = "965G";
1822 break;
1823
1687 case PCI_DEVICE_ID_INTEL_7505_0: 1824 case PCI_DEVICE_ID_INTEL_7505_0:
1688 bridge->driver = &intel_7505_driver; 1825 bridge->driver = &intel_7505_driver;
1689 name = "E7505"; 1826 name = "E7505";
@@ -1766,6 +1903,7 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev)
1766 agp_put_bridge(bridge); 1903 agp_put_bridge(bridge);
1767} 1904}
1768 1905
1906#ifdef CONFIG_PM
1769static int agp_intel_resume(struct pci_dev *pdev) 1907static int agp_intel_resume(struct pci_dev *pdev)
1770{ 1908{
1771 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 1909 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
@@ -1786,9 +1924,12 @@ static int agp_intel_resume(struct pci_dev *pdev)
1786 intel_i830_configure(); 1924 intel_i830_configure();
1787 else if (bridge->driver == &intel_810_driver) 1925 else if (bridge->driver == &intel_810_driver)
1788 intel_i810_configure(); 1926 intel_i810_configure();
1927 else if (bridge->driver == &intel_i965_driver)
1928 intel_i915_configure();
1789 1929
1790 return 0; 1930 return 0;
1791} 1931}
1932#endif
1792 1933
1793static struct pci_device_id agp_intel_pci_table[] = { 1934static struct pci_device_id agp_intel_pci_table[] = {
1794#define ID(x) \ 1935#define ID(x) \
@@ -1825,6 +1966,10 @@ static struct pci_device_id agp_intel_pci_table[] = {
1825 ID(PCI_DEVICE_ID_INTEL_82915GM_HB), 1966 ID(PCI_DEVICE_ID_INTEL_82915GM_HB),
1826 ID(PCI_DEVICE_ID_INTEL_82945G_HB), 1967 ID(PCI_DEVICE_ID_INTEL_82945G_HB),
1827 ID(PCI_DEVICE_ID_INTEL_82945GM_HB), 1968 ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
1969 ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
1970 ID(PCI_DEVICE_ID_INTEL_82965G_1_HB),
1971 ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
1972 ID(PCI_DEVICE_ID_INTEL_82965G_HB),
1828 { } 1973 { }
1829}; 1974};
1830 1975
@@ -1835,7 +1980,9 @@ static struct pci_driver agp_intel_pci_driver = {
1835 .id_table = agp_intel_pci_table, 1980 .id_table = agp_intel_pci_table,
1836 .probe = agp_intel_probe, 1981 .probe = agp_intel_probe,
1837 .remove = __devexit_p(agp_intel_remove), 1982 .remove = __devexit_p(agp_intel_remove),
1983#ifdef CONFIG_PM
1838 .resume = agp_intel_resume, 1984 .resume = agp_intel_resume,
1985#endif
1839}; 1986};
1840 1987
1841static int __init agp_intel_init(void) 1988static int __init agp_intel_init(void)
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index 1de1b12043bf..91b71e750ee1 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -601,8 +601,8 @@ static int __devinit agp_uninorth_probe(struct pci_dev *pdev,
601 uninorth_node = of_find_node_by_name(NULL, "u3"); 601 uninorth_node = of_find_node_by_name(NULL, "u3");
602 } 602 }
603 if (uninorth_node) { 603 if (uninorth_node) {
604 int *revprop = (int *) 604 const int *revprop = get_property(uninorth_node,
605 get_property(uninorth_node, "device-rev", NULL); 605 "device-rev", NULL);
606 if (revprop != NULL) 606 if (revprop != NULL)
607 uninorth_rev = *revprop & 0x3f; 607 uninorth_rev = *revprop & 0x3f;
608 of_node_put(uninorth_node); 608 of_node_put(uninorth_node);
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index b8ec25d17478..c149ac9ce9a7 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -9,7 +9,7 @@
9#include <linux/agp_backend.h> 9#include <linux/agp_backend.h>
10#include "agp.h" 10#include "agp.h"
11 11
12static struct pci_device_id agp_via_pci_table[]; 12static const struct pci_device_id agp_via_pci_table[];
13 13
14#define VIA_GARTCTRL 0x80 14#define VIA_GARTCTRL 0x80
15#define VIA_APSIZE 0x84 15#define VIA_APSIZE 0x84
@@ -485,7 +485,7 @@ static int agp_via_resume(struct pci_dev *pdev)
485#endif /* CONFIG_PM */ 485#endif /* CONFIG_PM */
486 486
487/* must be the same order as name table above */ 487/* must be the same order as name table above */
488static struct pci_device_id agp_via_pci_table[] = { 488static const struct pci_device_id agp_via_pci_table[] = {
489#define ID(x) \ 489#define ID(x) \
490 { \ 490 { \
491 .class = (PCI_CLASS_BRIDGE_HOST << 8), \ 491 .class = (PCI_CLASS_BRIDGE_HOST << 8), \
diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
new file mode 100644
index 000000000000..b8c22255f6ad
--- /dev/null
+++ b/drivers/char/briq_panel.c
@@ -0,0 +1,271 @@
1/*
2 * Drivers for the Total Impact PPC based computer "BRIQ"
3 * by Dr. Karsten Jeppesen
4 *
5 */
6
7#include <linux/module.h>
8
9#include <linux/types.h>
10#include <linux/errno.h>
11#include <linux/sched.h>
12#include <linux/tty.h>
13#include <linux/timer.h>
14#include <linux/config.h>
15#include <linux/kernel.h>
16#include <linux/wait.h>
17#include <linux/string.h>
18#include <linux/slab.h>
19#include <linux/ioport.h>
20#include <linux/delay.h>
21#include <linux/miscdevice.h>
22#include <linux/fs.h>
23#include <linux/mm.h>
24#include <linux/init.h>
25
26#include <asm/uaccess.h>
27#include <asm/io.h>
28#include <asm/prom.h>
29
30#define BRIQ_PANEL_MINOR 156
31#define BRIQ_PANEL_VFD_IOPORT 0x0390
32#define BRIQ_PANEL_LED_IOPORT 0x0398
33#define BRIQ_PANEL_VER "1.1 (04/20/2002)"
34#define BRIQ_PANEL_MSG0 "Loading Linux"
35
36static int vfd_is_open;
37static unsigned char vfd[40];
38static int vfd_cursor;
39static unsigned char ledpb, led;
40
41static void update_vfd(void)
42{
43 int i;
44
45 /* cursor home */
46 outb(0x02, BRIQ_PANEL_VFD_IOPORT);
47 for (i=0; i<20; i++)
48 outb(vfd[i], BRIQ_PANEL_VFD_IOPORT + 1);
49
50 /* cursor to next line */
51 outb(0xc0, BRIQ_PANEL_VFD_IOPORT);
52 for (i=20; i<40; i++)
53 outb(vfd[i], BRIQ_PANEL_VFD_IOPORT + 1);
54
55}
56
57static void set_led(char state)
58{
59 if (state == 'R')
60 led = 0x01;
61 else if (state == 'G')
62 led = 0x02;
63 else if (state == 'Y')
64 led = 0x03;
65 else if (state == 'X')
66 led = 0x00;
67 outb(led, BRIQ_PANEL_LED_IOPORT);
68}
69
70static int briq_panel_open(struct inode *ino, struct file *filep)
71{
72 /* enforce single access */
73 if (vfd_is_open)
74 return -EBUSY;
75 vfd_is_open = 1;
76
77 return 0;
78}
79
80static int briq_panel_release(struct inode *ino, struct file *filep)
81{
82 if (!vfd_is_open)
83 return -ENODEV;
84
85 vfd_is_open = 0;
86
87 return 0;
88}
89
90static ssize_t briq_panel_read(struct file *file, char __user *buf, size_t count,
91 loff_t *ppos)
92{
93 unsigned short c;
94 unsigned char cp;
95
96#if 0 /* Can't seek (pread) on this device */
97 if (ppos != &file->f_pos)
98 return -ESPIPE;
99#endif
100
101 if (!vfd_is_open)
102 return -ENODEV;
103
104 c = (inb(BRIQ_PANEL_LED_IOPORT) & 0x000c) | (ledpb & 0x0003);
105 set_led(' ');
106 /* upper button released */
107 if ((!(ledpb & 0x0004)) && (c & 0x0004)) {
108 cp = ' ';
109 ledpb = c;
110 if (copy_to_user(buf, &cp, 1))
111 return -EFAULT;
112 return 1;
113 }
114 /* lower button released */
115 else if ((!(ledpb & 0x0008)) && (c & 0x0008)) {
116 cp = '\r';
117 ledpb = c;
118 if (copy_to_user(buf, &cp, 1))
119 return -EFAULT;
120 return 1;
121 } else {
122 ledpb = c;
123 return 0;
124 }
125}
126
127static void scroll_vfd( void )
128{
129 int i;
130
131 for (i=0; i<20; i++) {
132 vfd[i] = vfd[i+20];
133 vfd[i+20] = ' ';
134 }
135 vfd_cursor = 20;
136}
137
138static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_t len,
139 loff_t *ppos)
140{
141 size_t indx = len;
142 int i, esc = 0;
143
144#if 0 /* Can't seek (pwrite) on this device */
145 if (ppos != &file->f_pos)
146 return -ESPIPE;
147#endif
148
149 if (!vfd_is_open)
150 return -EBUSY;
151
152 for (;;) {
153 char c;
154 if (!indx)
155 break;
156 if (get_user(c, buf))
157 return -EFAULT;
158 if (esc) {
159 set_led(c);
160 esc = 0;
161 } else if (c == 27) {
162 esc = 1;
163 } else if (c == 12) {
164 /* do a form feed */
165 for (i=0; i<40; i++)
166 vfd[i] = ' ';
167 vfd_cursor = 0;
168 } else if (c == 10) {
169 if (vfd_cursor < 20)
170 vfd_cursor = 20;
171 else if (vfd_cursor < 40)
172 vfd_cursor = 40;
173 else if (vfd_cursor < 60)
174 vfd_cursor = 60;
175 if (vfd_cursor > 59)
176 scroll_vfd();
177 } else {
178 /* just a character */
179 if (vfd_cursor > 39)
180 scroll_vfd();
181 vfd[vfd_cursor++] = c;
182 }
183 indx--;
184 buf++;
185 }
186 update_vfd();
187
188 return len;
189}
190
191static struct file_operations briq_panel_fops = {
192 .owner = THIS_MODULE,
193 .read = briq_panel_read,
194 .write = briq_panel_write,
195 .open = briq_panel_open,
196 .release = briq_panel_release,
197};
198
199static struct miscdevice briq_panel_miscdev = {
200 BRIQ_PANEL_MINOR,
201 "briq_panel",
202 &briq_panel_fops
203};
204
205static int __init briq_panel_init(void)
206{
207 struct device_node *root = find_path_device("/");
208 const char *machine;
209 int i;
210
211 machine = get_property(root, "model", NULL);
212 if (!machine || strncmp(machine, "TotalImpact,BRIQ-1", 18) != 0)
213 return -ENODEV;
214
215 printk(KERN_INFO
216 "briq_panel: v%s Dr. Karsten Jeppesen (kj@totalimpact.com)\n",
217 BRIQ_PANEL_VER);
218
219 if (!request_region(BRIQ_PANEL_VFD_IOPORT, 4, "BRIQ Front Panel"))
220 return -EBUSY;
221
222 if (!request_region(BRIQ_PANEL_LED_IOPORT, 2, "BRIQ Front Panel")) {
223 release_region(BRIQ_PANEL_VFD_IOPORT, 4);
224 return -EBUSY;
225 }
226 ledpb = inb(BRIQ_PANEL_LED_IOPORT) & 0x000c;
227
228 if (misc_register(&briq_panel_miscdev) < 0) {
229 release_region(BRIQ_PANEL_VFD_IOPORT, 4);
230 release_region(BRIQ_PANEL_LED_IOPORT, 2);
231 return -EBUSY;
232 }
233
234 outb(0x38, BRIQ_PANEL_VFD_IOPORT); /* Function set */
235 outb(0x01, BRIQ_PANEL_VFD_IOPORT); /* Clear display */
236 outb(0x0c, BRIQ_PANEL_VFD_IOPORT); /* Display on */
237 outb(0x06, BRIQ_PANEL_VFD_IOPORT); /* Entry normal */
238 for (i=0; i<40; i++)
239 vfd[i]=' ';
240#ifndef MODULE
241 vfd[0] = 'L';
242 vfd[1] = 'o';
243 vfd[2] = 'a';
244 vfd[3] = 'd';
245 vfd[4] = 'i';
246 vfd[5] = 'n';
247 vfd[6] = 'g';
248 vfd[7] = ' ';
249 vfd[8] = '.';
250 vfd[9] = '.';
251 vfd[10] = '.';
252#endif /* !MODULE */
253
254 update_vfd();
255
256 return 0;
257}
258
259static void __exit briq_panel_exit(void)
260{
261 misc_deregister(&briq_panel_miscdev);
262 release_region(BRIQ_PANEL_VFD_IOPORT, 4);
263 release_region(BRIQ_PANEL_LED_IOPORT, 2);
264}
265
266module_init(briq_panel_init);
267module_exit(briq_panel_exit);
268
269MODULE_LICENSE("GPL");
270MODULE_AUTHOR("Karsten Jeppesen <karsten@jeppesens.com>");
271MODULE_DESCRIPTION("Driver for the Total Impact briQ front panel");
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 613d67f1c7f0..a76d2c40dd5e 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -80,7 +80,8 @@ struct hvc_struct {
80 struct tty_struct *tty; 80 struct tty_struct *tty;
81 unsigned int count; 81 unsigned int count;
82 int do_wakeup; 82 int do_wakeup;
83 char outbuf[N_OUTBUF] __ALIGNED__; 83 char *outbuf;
84 int outbuf_size;
84 int n_outbuf; 85 int n_outbuf;
85 uint32_t vtermno; 86 uint32_t vtermno;
86 struct hv_ops *ops; 87 struct hv_ops *ops;
@@ -319,10 +320,8 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
319 struct kobject *kobjp; 320 struct kobject *kobjp;
320 321
321 /* Auto increments kobject reference if found. */ 322 /* Auto increments kobject reference if found. */
322 if (!(hp = hvc_get_by_index(tty->index))) { 323 if (!(hp = hvc_get_by_index(tty->index)))
323 printk(KERN_WARNING "hvc_console: tty open failed, no vty associated with tty.\n");
324 return -ENODEV; 324 return -ENODEV;
325 }
326 325
327 spin_lock_irqsave(&hp->lock, flags); 326 spin_lock_irqsave(&hp->lock, flags);
328 /* Check and then increment for fast path open. */ 327 /* Check and then increment for fast path open. */
@@ -505,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
505 if (hp->n_outbuf > 0) 504 if (hp->n_outbuf > 0)
506 hvc_push(hp); 505 hvc_push(hp);
507 506
508 while (count > 0 && (rsize = N_OUTBUF - hp->n_outbuf) > 0) { 507 while (count > 0 && (rsize = hp->outbuf_size - hp->n_outbuf) > 0) {
509 if (rsize > count) 508 if (rsize > count)
510 rsize = count; 509 rsize = count;
511 memcpy(hp->outbuf + hp->n_outbuf, buf, rsize); 510 memcpy(hp->outbuf + hp->n_outbuf, buf, rsize);
@@ -538,7 +537,7 @@ static int hvc_write_room(struct tty_struct *tty)
538 if (!hp) 537 if (!hp)
539 return -1; 538 return -1;
540 539
541 return N_OUTBUF - hp->n_outbuf; 540 return hp->outbuf_size - hp->n_outbuf;
542} 541}
543 542
544static int hvc_chars_in_buffer(struct tty_struct *tty) 543static int hvc_chars_in_buffer(struct tty_struct *tty)
@@ -729,12 +728,13 @@ static struct kobj_type hvc_kobj_type = {
729}; 728};
730 729
731struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int irq, 730struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int irq,
732 struct hv_ops *ops) 731 struct hv_ops *ops, int outbuf_size)
733{ 732{
734 struct hvc_struct *hp; 733 struct hvc_struct *hp;
735 int i; 734 int i;
736 735
737 hp = kmalloc(sizeof(*hp), GFP_KERNEL); 736 hp = kmalloc(ALIGN(sizeof(*hp), sizeof(long)) + outbuf_size,
737 GFP_KERNEL);
738 if (!hp) 738 if (!hp)
739 return ERR_PTR(-ENOMEM); 739 return ERR_PTR(-ENOMEM);
740 740
@@ -743,6 +743,8 @@ struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int irq,
743 hp->vtermno = vtermno; 743 hp->vtermno = vtermno;
744 hp->irq = irq; 744 hp->irq = irq;
745 hp->ops = ops; 745 hp->ops = ops;
746 hp->outbuf_size = outbuf_size;
747 hp->outbuf = &((char *)hp)[ALIGN(sizeof(*hp), sizeof(long))];
746 748
747 kobject_init(&hp->kobj); 749 kobject_init(&hp->kobj);
748 hp->kobj.ktype = &hvc_kobj_type; 750 hp->kobj.ktype = &hvc_kobj_type;
diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
index 96b7401319c1..8c59818050e6 100644
--- a/drivers/char/hvc_console.h
+++ b/drivers/char/hvc_console.h
@@ -56,7 +56,7 @@ extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
56 56
57/* register a vterm for hvc tty operation (module_init or hotplug add) */ 57/* register a vterm for hvc tty operation (module_init or hotplug add) */
58extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int irq, 58extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int irq,
59 struct hv_ops *ops); 59 struct hv_ops *ops, int outbuf_size);
60/* remove a vterm from hvc tty operation (modele_exit or hotplug remove) */ 60/* remove a vterm from hvc tty operation (modele_exit or hotplug remove) */
61extern int __devexit hvc_remove(struct hvc_struct *hp); 61extern int __devexit hvc_remove(struct hvc_struct *hp);
62 62
diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
new file mode 100644
index 000000000000..4747729459c7
--- /dev/null
+++ b/drivers/char/hvc_iseries.c
@@ -0,0 +1,594 @@
1/*
2 * iSeries vio driver interface to hvc_console.c
3 *
4 * This code is based heavily on hvc_vio.c and viocons.c
5 *
6 * Copyright (C) 2006 Stephen Rothwell, IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22#include <stdarg.h>
23#include <linux/types.h>
24#include <linux/init.h>
25#include <linux/kernel.h>
26#include <linux/spinlock.h>
27#include <linux/console.h>
28
29#include <asm/hvconsole.h>
30#include <asm/vio.h>
31#include <asm/prom.h>
32#include <asm/iseries/vio.h>
33#include <asm/iseries/hv_call.h>
34#include <asm/iseries/hv_lp_config.h>
35#include <asm/iseries/hv_lp_event.h>
36
37#include "hvc_console.h"
38
39#define VTTY_PORTS 10
40
41static DEFINE_SPINLOCK(consolelock);
42static DEFINE_SPINLOCK(consoleloglock);
43
44static const char hvc_driver_name[] = "hvc_console";
45
46#define IN_BUF_SIZE 200
47
48/*
49 * Our port information.
50 */
51static struct port_info {
52 HvLpIndex lp;
53 u64 seq; /* sequence number of last HV send */
54 u64 ack; /* last ack from HV */
55 struct hvc_struct *hp;
56 int in_start;
57 int in_end;
58 unsigned char in_buf[IN_BUF_SIZE];
59} port_info[VTTY_PORTS] = {
60 [ 0 ... VTTY_PORTS - 1 ] = {
61 .lp = HvLpIndexInvalid
62 }
63};
64
65#define viochar_is_console(pi) ((pi) == &port_info[0])
66
67static struct vio_device_id hvc_driver_table[] __devinitdata = {
68 {"serial", "IBM,iSeries-vty"},
69 { "", "" }
70};
71MODULE_DEVICE_TABLE(vio, hvc_driver_table);
72
73static void hvlog(char *fmt, ...)
74{
75 int i;
76 unsigned long flags;
77 va_list args;
78 static char buf[256];
79
80 spin_lock_irqsave(&consoleloglock, flags);
81 va_start(args, fmt);
82 i = vscnprintf(buf, sizeof(buf) - 1, fmt, args);
83 va_end(args);
84 buf[i++] = '\r';
85 HvCall_writeLogBuffer(buf, i);
86 spin_unlock_irqrestore(&consoleloglock, flags);
87}
88
89/*
90 * Initialize the common fields in a charLpEvent
91 */
92static void init_data_event(struct viocharlpevent *viochar, HvLpIndex lp)
93{
94 struct HvLpEvent *hev = &viochar->event;
95
96 memset(viochar, 0, sizeof(struct viocharlpevent));
97
98 hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DEFERRED_ACK |
99 HV_LP_EVENT_INT;
100 hev->xType = HvLpEvent_Type_VirtualIo;
101 hev->xSubtype = viomajorsubtype_chario | viochardata;
102 hev->xSourceLp = HvLpConfig_getLpIndex();
103 hev->xTargetLp = lp;
104 hev->xSizeMinus1 = sizeof(struct viocharlpevent);
105 hev->xSourceInstanceId = viopath_sourceinst(lp);
106 hev->xTargetInstanceId = viopath_targetinst(lp);
107}
108
109static int get_chars(uint32_t vtermno, char *buf, int count)
110{
111 struct port_info *pi;
112 int n = 0;
113 unsigned long flags;
114
115 if (vtermno >= VTTY_PORTS)
116 return -EINVAL;
117 if (count == 0)
118 return 0;
119
120 pi = &port_info[vtermno];
121 spin_lock_irqsave(&consolelock, flags);
122
123 if (pi->in_end == 0)
124 goto done;
125
126 n = pi->in_end - pi->in_start;
127 if (n > count)
128 n = count;
129 memcpy(buf, &pi->in_buf[pi->in_start], n);
130 pi->in_start += n;
131 if (pi->in_start == pi->in_end) {
132 pi->in_start = 0;
133 pi->in_end = 0;
134 }
135done:
136 spin_unlock_irqrestore(&consolelock, flags);
137 return n;
138}
139
140static int put_chars(uint32_t vtermno, const char *buf, int count)
141{
142 struct viocharlpevent *viochar;
143 struct port_info *pi;
144 HvLpEvent_Rc hvrc;
145 unsigned long flags;
146 int sent = 0;
147
148 if (vtermno >= VTTY_PORTS)
149 return -EINVAL;
150
151 pi = &port_info[vtermno];
152
153 spin_lock_irqsave(&consolelock, flags);
154
155 if (viochar_is_console(pi) && !viopath_isactive(pi->lp)) {
156 spin_lock_irqsave(&consoleloglock, flags);
157 HvCall_writeLogBuffer(buf, count);
158 spin_unlock_irqrestore(&consoleloglock, flags);
159 sent = count;
160 goto done;
161 }
162
163 viochar = vio_get_event_buffer(viomajorsubtype_chario);
164 if (viochar == NULL) {
165 hvlog("\n\rviocons: Can't get viochar buffer.");
166 goto done;
167 }
168
169 while ((count > 0) && ((pi->seq - pi->ack) < VIOCHAR_WINDOW)) {
170 int len;
171
172 len = (count > VIOCHAR_MAX_DATA) ? VIOCHAR_MAX_DATA : count;
173
174 if (viochar_is_console(pi)) {
175 spin_lock_irqsave(&consoleloglock, flags);
176 HvCall_writeLogBuffer(buf, len);
177 spin_unlock_irqrestore(&consoleloglock, flags);
178 }
179
180 init_data_event(viochar, pi->lp);
181
182 viochar->len = len;
183 viochar->event.xCorrelationToken = pi->seq++;
184 viochar->event.xSizeMinus1 =
185 offsetof(struct viocharlpevent, data) + len;
186
187 memcpy(viochar->data, buf, len);
188
189 hvrc = HvCallEvent_signalLpEvent(&viochar->event);
190 if (hvrc)
191 hvlog("\n\rerror sending event! return code %d\n\r",
192 (int)hvrc);
193 sent += len;
194 count -= len;
195 buf += len;
196 }
197
198 vio_free_event_buffer(viomajorsubtype_chario, viochar);
199done:
200 spin_unlock_irqrestore(&consolelock, flags);
201 return sent;
202}
203
204static struct hv_ops hvc_get_put_ops = {
205 .get_chars = get_chars,
206 .put_chars = put_chars,
207};
208
209static int __devinit hvc_vio_probe(struct vio_dev *vdev,
210 const struct vio_device_id *id)
211{
212 struct hvc_struct *hp;
213 struct port_info *pi;
214
215 /* probed with invalid parameters. */
216 if (!vdev || !id)
217 return -EPERM;
218
219 if (vdev->unit_address >= VTTY_PORTS)
220 return -ENODEV;
221
222 pi = &port_info[vdev->unit_address];
223
224 hp = hvc_alloc(vdev->unit_address, vdev->irq, &hvc_get_put_ops,
225 VIOCHAR_MAX_DATA);
226 if (IS_ERR(hp))
227 return PTR_ERR(hp);
228 pi->hp = hp;
229 dev_set_drvdata(&vdev->dev, pi);
230
231 return 0;
232}
233
234static int __devexit hvc_vio_remove(struct vio_dev *vdev)
235{
236 struct port_info *pi = dev_get_drvdata(&vdev->dev);
237 struct hvc_struct *hp = pi->hp;
238
239 return hvc_remove(hp);
240}
241
242static struct vio_driver hvc_vio_driver = {
243 .id_table = hvc_driver_table,
244 .probe = hvc_vio_probe,
245 .remove = hvc_vio_remove,
246 .driver = {
247 .name = hvc_driver_name,
248 .owner = THIS_MODULE,
249 }
250};
251
252static void hvc_open_event(struct HvLpEvent *event)
253{
254 unsigned long flags;
255 struct viocharlpevent *cevent = (struct viocharlpevent *)event;
256 u8 port = cevent->virtual_device;
257 struct port_info *pi;
258 int reject = 0;
259
260 if (hvlpevent_is_ack(event)) {
261 if (port >= VTTY_PORTS)
262 return;
263
264 spin_lock_irqsave(&consolelock, flags);
265
266 pi = &port_info[port];
267 if (event->xRc == HvLpEvent_Rc_Good) {
268 pi->seq = pi->ack = 0;
269 /*
270 * This line allows connections from the primary
271 * partition but once one is connected from the
272 * primary partition nothing short of a reboot
273 * of linux will allow access from the hosting
274 * partition again without a required iSeries fix.
275 */
276 pi->lp = event->xTargetLp;
277 }
278
279 spin_unlock_irqrestore(&consolelock, flags);
280 if (event->xRc != HvLpEvent_Rc_Good)
281 printk(KERN_WARNING
282 "hvc: handle_open_event: event->xRc == (%d).\n",
283 event->xRc);
284
285 if (event->xCorrelationToken != 0) {
286 atomic_t *aptr= (atomic_t *)event->xCorrelationToken;
287 atomic_set(aptr, 1);
288 } else
289 printk(KERN_WARNING
290 "hvc: weird...got open ack without atomic\n");
291 return;
292 }
293
294 /* This had better require an ack, otherwise complain */
295 if (!hvlpevent_need_ack(event)) {
296 printk(KERN_WARNING "hvc: viocharopen without ack bit!\n");
297 return;
298 }
299
300 spin_lock_irqsave(&consolelock, flags);
301
302 /* Make sure this is a good virtual tty */
303 if (port >= VTTY_PORTS) {
304 event->xRc = HvLpEvent_Rc_SubtypeError;
305 cevent->subtype_result_code = viorc_openRejected;
306 /*
307 * Flag state here since we can't printk while holding
308 * the consolelock spinlock.
309 */
310 reject = 1;
311 } else {
312 pi = &port_info[port];
313 if ((pi->lp != HvLpIndexInvalid) &&
314 (pi->lp != event->xSourceLp)) {
315 /*
316 * If this is tty is already connected to a different
317 * partition, fail.
318 */
319 event->xRc = HvLpEvent_Rc_SubtypeError;
320 cevent->subtype_result_code = viorc_openRejected;
321 reject = 2;
322 } else {
323 pi->lp = event->xSourceLp;
324 event->xRc = HvLpEvent_Rc_Good;
325 cevent->subtype_result_code = viorc_good;
326 pi->seq = pi->ack = 0;
327 }
328 }
329
330 spin_unlock_irqrestore(&consolelock, flags);
331
332 if (reject == 1)
333 printk(KERN_WARNING "hvc: open rejected: bad virtual tty.\n");
334 else if (reject == 2)
335 printk(KERN_WARNING "hvc: open rejected: console in exclusive "
336 "use by another partition.\n");
337
338 /* Return the acknowledgement */
339 HvCallEvent_ackLpEvent(event);
340}
341
342/*
343 * Handle a close charLpEvent. This should ONLY be an Interrupt because the
344 * virtual console should never actually issue a close event to the hypervisor
345 * because the virtual console never goes away. A close event coming from the
346 * hypervisor simply means that there are no client consoles connected to the
347 * virtual console.
348 */
349static void hvc_close_event(struct HvLpEvent *event)
350{
351 unsigned long flags;
352 struct viocharlpevent *cevent = (struct viocharlpevent *)event;
353 u8 port = cevent->virtual_device;
354
355 if (!hvlpevent_is_int(event)) {
356 printk(KERN_WARNING
357 "hvc: got unexpected close acknowlegement\n");
358 return;
359 }
360
361 if (port >= VTTY_PORTS) {
362 printk(KERN_WARNING
363 "hvc: close message from invalid virtual device.\n");
364 return;
365 }
366
367 /* For closes, just mark the console partition invalid */
368 spin_lock_irqsave(&consolelock, flags);
369
370 if (port_info[port].lp == event->xSourceLp)
371 port_info[port].lp = HvLpIndexInvalid;
372
373 spin_unlock_irqrestore(&consolelock, flags);
374}
375
376static void hvc_data_event(struct HvLpEvent *event)
377{
378 unsigned long flags;
379 struct viocharlpevent *cevent = (struct viocharlpevent *)event;
380 struct port_info *pi;
381 int n;
382 u8 port = cevent->virtual_device;
383
384 if (port >= VTTY_PORTS) {
385 printk(KERN_WARNING "hvc: data on invalid virtual device %d\n",
386 port);
387 return;
388 }
389 if (cevent->len == 0)
390 return;
391
392 /*
393 * Change 05/01/2003 - Ryan Arnold: If a partition other than
394 * the current exclusive partition tries to send us data
395 * events then just drop them on the floor because we don't
396 * want his stinking data. He isn't authorized to receive
397 * data because he wasn't the first one to get the console,
398 * therefore he shouldn't be allowed to send data either.
399 * This will work without an iSeries fix.
400 */
401 pi = &port_info[port];
402 if (pi->lp != event->xSourceLp)
403 return;
404
405 spin_lock_irqsave(&consolelock, flags);
406
407 n = IN_BUF_SIZE - pi->in_end;
408 if (n > cevent->len)
409 n = cevent->len;
410 if (n > 0) {
411 memcpy(&pi->in_buf[pi->in_end], cevent->data, n);
412 pi->in_end += n;
413 }
414 spin_unlock_irqrestore(&consolelock, flags);
415 if (n == 0)
416 printk(KERN_WARNING "hvc: input buffer overflow\n");
417}
418
419static void hvc_ack_event(struct HvLpEvent *event)
420{
421 struct viocharlpevent *cevent = (struct viocharlpevent *)event;
422 unsigned long flags;
423 u8 port = cevent->virtual_device;
424
425 if (port >= VTTY_PORTS) {
426 printk(KERN_WARNING "hvc: data on invalid virtual device\n");
427 return;
428 }
429
430 spin_lock_irqsave(&consolelock, flags);
431 port_info[port].ack = event->xCorrelationToken;
432 spin_unlock_irqrestore(&consolelock, flags);
433}
434
435static void hvc_config_event(struct HvLpEvent *event)
436{
437 struct viocharlpevent *cevent = (struct viocharlpevent *)event;
438
439 if (cevent->data[0] == 0x01)
440 printk(KERN_INFO "hvc: window resized to %d: %d: %d: %d\n",
441 cevent->data[1], cevent->data[2],
442 cevent->data[3], cevent->data[4]);
443 else
444 printk(KERN_WARNING "hvc: unknown config event\n");
445}
446
447static void hvc_handle_event(struct HvLpEvent *event)
448{
449 int charminor;
450
451 if (event == NULL)
452 return;
453
454 charminor = event->xSubtype & VIOMINOR_SUBTYPE_MASK;
455 switch (charminor) {
456 case viocharopen:
457 hvc_open_event(event);
458 break;
459 case viocharclose:
460 hvc_close_event(event);
461 break;
462 case viochardata:
463 hvc_data_event(event);
464 break;
465 case viocharack:
466 hvc_ack_event(event);
467 break;
468 case viocharconfig:
469 hvc_config_event(event);
470 break;
471 default:
472 if (hvlpevent_is_int(event) && hvlpevent_need_ack(event)) {
473 event->xRc = HvLpEvent_Rc_InvalidSubtype;
474 HvCallEvent_ackLpEvent(event);
475 }
476 }
477}
478
479static int send_open(HvLpIndex remoteLp, void *sem)
480{
481 return HvCallEvent_signalLpEventFast(remoteLp,
482 HvLpEvent_Type_VirtualIo,
483 viomajorsubtype_chario | viocharopen,
484 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
485 viopath_sourceinst(remoteLp),
486 viopath_targetinst(remoteLp),
487 (u64)(unsigned long)sem, VIOVERSION << 16,
488 0, 0, 0, 0);
489}
490
491static int hvc_vio_init(void)
492{
493 atomic_t wait_flag;
494 int rc;
495
496 /* +2 for fudge */
497 rc = viopath_open(HvLpConfig_getPrimaryLpIndex(),
498 viomajorsubtype_chario, VIOCHAR_WINDOW + 2);
499 if (rc)
500 printk(KERN_WARNING "hvc: error opening to primary %d\n", rc);
501
502 if (viopath_hostLp == HvLpIndexInvalid)
503 vio_set_hostlp();
504
505 /*
506 * And if the primary is not the same as the hosting LP, open to the
507 * hosting lp
508 */
509 if ((viopath_hostLp != HvLpIndexInvalid) &&
510 (viopath_hostLp != HvLpConfig_getPrimaryLpIndex())) {
511 printk(KERN_INFO "hvc: open path to hosting (%d)\n",
512 viopath_hostLp);
513 rc = viopath_open(viopath_hostLp, viomajorsubtype_chario,
514 VIOCHAR_WINDOW + 2); /* +2 for fudge */
515 if (rc)
516 printk(KERN_WARNING
517 "error opening to partition %d: %d\n",
518 viopath_hostLp, rc);
519 }
520
521 if (vio_setHandler(viomajorsubtype_chario, hvc_handle_event) < 0)
522 printk(KERN_WARNING
523 "hvc: error seting handler for console events!\n");
524
525 /*
526 * First, try to open the console to the hosting lp.
527 * Wait on a semaphore for the response.
528 */
529 atomic_set(&wait_flag, 0);
530 if ((viopath_isactive(viopath_hostLp)) &&
531 (send_open(viopath_hostLp, &wait_flag) == 0)) {
532 printk(KERN_INFO "hvc: hosting partition %d\n", viopath_hostLp);
533 while (atomic_read(&wait_flag) == 0)
534 mb();
535 atomic_set(&wait_flag, 0);
536 }
537
538 /*
539 * If we don't have an active console, try the primary
540 */
541 if ((!viopath_isactive(port_info[0].lp)) &&
542 (viopath_isactive(HvLpConfig_getPrimaryLpIndex())) &&
543 (send_open(HvLpConfig_getPrimaryLpIndex(), &wait_flag) == 0)) {
544 printk(KERN_INFO "hvc: opening console to primary partition\n");
545 while (atomic_read(&wait_flag) == 0)
546 mb();
547 }
548
549 /* Register as a vio device to receive callbacks */
550 rc = vio_register_driver(&hvc_vio_driver);
551
552 return rc;
553}
554module_init(hvc_vio_init); /* after drivers/char/hvc_console.c */
555
556static void hvc_vio_exit(void)
557{
558 vio_unregister_driver(&hvc_vio_driver);
559}
560module_exit(hvc_vio_exit);
561
562/* the device tree order defines our numbering */
563static int hvc_find_vtys(void)
564{
565 struct device_node *vty;
566 int num_found = 0;
567
568 for (vty = of_find_node_by_name(NULL, "vty"); vty != NULL;
569 vty = of_find_node_by_name(vty, "vty")) {
570 uint32_t *vtermno;
571
572 /* We have statically defined space for only a certain number
573 * of console adapters.
574 */
575 if ((num_found >= MAX_NR_HVC_CONSOLES) ||
576 (num_found >= VTTY_PORTS))
577 break;
578
579 vtermno = (uint32_t *)get_property(vty, "reg", NULL);
580 if (!vtermno)
581 continue;
582
583 if (!device_is_compatible(vty, "IBM,iSeries-vty"))
584 continue;
585
586 if (num_found == 0)
587 add_preferred_console("hvc", 0, NULL);
588 hvc_instantiate(*vtermno, num_found, &hvc_get_put_ops);
589 ++num_found;
590 }
591
592 return num_found;
593}
594console_initcall(hvc_find_vtys);
diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
index 57106e02fd2e..4b97eaf18602 100644
--- a/drivers/char/hvc_rtas.c
+++ b/drivers/char/hvc_rtas.c
@@ -94,7 +94,7 @@ static int hvc_rtas_init(void)
94 94
95 /* Allocate an hvc_struct for the console device we instantiated 95 /* Allocate an hvc_struct for the console device we instantiated
96 * earlier. Save off hp so that we can return it on exit */ 96 * earlier. Save off hp so that we can return it on exit */
97 hp = hvc_alloc(hvc_rtas_cookie, NO_IRQ, &hvc_rtas_get_put_ops); 97 hp = hvc_alloc(hvc_rtas_cookie, NO_IRQ, &hvc_rtas_get_put_ops, 16);
98 if (IS_ERR(hp)) 98 if (IS_ERR(hp))
99 return PTR_ERR(hp); 99 return PTR_ERR(hp);
100 100
diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
index 9add81ceb440..cc95941148fb 100644
--- a/drivers/char/hvc_vio.c
+++ b/drivers/char/hvc_vio.c
@@ -90,7 +90,8 @@ static int __devinit hvc_vio_probe(struct vio_dev *vdev,
90 if (!vdev || !id) 90 if (!vdev || !id)
91 return -EPERM; 91 return -EPERM;
92 92
93 hp = hvc_alloc(vdev->unit_address, vdev->irq, &hvc_get_put_ops); 93 hp = hvc_alloc(vdev->unit_address, vdev->irq, &hvc_get_put_ops,
94 MAX_VIO_PUT_CHARS);
94 if (IS_ERR(hp)) 95 if (IS_ERR(hp))
95 return PTR_ERR(hp); 96 return PTR_ERR(hp);
96 dev_set_drvdata(&vdev->dev, hp); 97 dev_set_drvdata(&vdev->dev, hp);
@@ -140,7 +141,7 @@ static int hvc_find_vtys(void)
140 141
141 for (vty = of_find_node_by_name(NULL, "vty"); vty != NULL; 142 for (vty = of_find_node_by_name(NULL, "vty"); vty != NULL;
142 vty = of_find_node_by_name(vty, "vty")) { 143 vty = of_find_node_by_name(vty, "vty")) {
143 uint32_t *vtermno; 144 const uint32_t *vtermno;
144 145
145 /* We have statically defined space for only a certain number 146 /* We have statically defined space for only a certain number
146 * of console adapters. 147 * of console adapters.
@@ -148,7 +149,7 @@ static int hvc_find_vtys(void)
148 if (num_found >= MAX_NR_HVC_CONSOLES) 149 if (num_found >= MAX_NR_HVC_CONSOLES)
149 break; 150 break;
150 151
151 vtermno = (uint32_t *)get_property(vty, "reg", NULL); 152 vtermno = get_property(vty, "reg", NULL);
152 if (!vtermno) 153 if (!vtermno)
153 continue; 154 continue;
154 155
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c
index 017f755632a3..a89a95fb5e40 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/char/hvsi.c
@@ -1274,11 +1274,10 @@ static int __init hvsi_console_init(void)
1274 vty != NULL; 1274 vty != NULL;
1275 vty = of_find_compatible_node(vty, "serial", "hvterm-protocol")) { 1275 vty = of_find_compatible_node(vty, "serial", "hvterm-protocol")) {
1276 struct hvsi_struct *hp; 1276 struct hvsi_struct *hp;
1277 uint32_t *vtermno; 1277 const uint32_t *vtermno, *irq;
1278 uint32_t *irq;
1279 1278
1280 vtermno = (uint32_t *)get_property(vty, "reg", NULL); 1279 vtermno = get_property(vty, "reg", NULL);
1281 irq = (uint32_t *)get_property(vty, "interrupts", NULL); 1280 irq = get_property(vty, "interrupts", NULL);
1282 if (!vtermno || !irq) 1281 if (!vtermno || !irq)
1283 continue; 1282 continue;
1284 1283
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index 84dfc4278139..8c09997cc3d6 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -3488,7 +3488,7 @@ static int stli_initecp(stlibrd_t *brdp)
3488 */ 3488 */
3489 EBRDENABLE(brdp); 3489 EBRDENABLE(brdp);
3490 sigsp = (cdkecpsig_t __iomem *) EBRDGETMEMPTR(brdp, CDK_SIGADDR); 3490 sigsp = (cdkecpsig_t __iomem *) EBRDGETMEMPTR(brdp, CDK_SIGADDR);
3491 memcpy(&sig, sigsp, sizeof(cdkecpsig_t)); 3491 memcpy_fromio(&sig, sigsp, sizeof(cdkecpsig_t));
3492 EBRDDISABLE(brdp); 3492 EBRDDISABLE(brdp);
3493 3493
3494 if (sig.magic != cpu_to_le32(ECP_MAGIC)) 3494 if (sig.magic != cpu_to_le32(ECP_MAGIC))
diff --git a/drivers/char/tpm/tpm_atmel.h b/drivers/char/tpm/tpm_atmel.h
index 2e68eeb8a2cd..aefd683c60b7 100644
--- a/drivers/char/tpm/tpm_atmel.h
+++ b/drivers/char/tpm/tpm_atmel.h
@@ -37,7 +37,7 @@ static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size)
37{ 37{
38 struct device_node *dn; 38 struct device_node *dn;
39 unsigned long address, size; 39 unsigned long address, size;
40 unsigned int *reg; 40 const unsigned int *reg;
41 int reglen; 41 int reglen;
42 int naddrc; 42 int naddrc;
43 int nsizec; 43 int nsizec;
@@ -52,7 +52,7 @@ static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size)
52 return NULL; 52 return NULL;
53 } 53 }
54 54
55 reg = (unsigned int *) get_property(dn, "reg", &reglen); 55 reg = get_property(dn, "reg", &reglen);
56 naddrc = prom_n_addr_cells(dn); 56 naddrc = prom_n_addr_cells(dn);
57 nsizec = prom_n_size_cells(dn); 57 nsizec = prom_n_size_cells(dn);
58 58
diff --git a/drivers/char/viocons.c b/drivers/char/viocons.c
index 766f7864c6c6..f3efeaf2826e 100644
--- a/drivers/char/viocons.c
+++ b/drivers/char/viocons.c
@@ -43,7 +43,6 @@
43#include <linux/sysrq.h> 43#include <linux/sysrq.h>
44 44
45#include <asm/iseries/vio.h> 45#include <asm/iseries/vio.h>
46
47#include <asm/iseries/hv_lp_event.h> 46#include <asm/iseries/hv_lp_event.h>
48#include <asm/iseries/hv_call_event.h> 47#include <asm/iseries/hv_call_event.h>
49#include <asm/iseries/hv_lp_config.h> 48#include <asm/iseries/hv_lp_config.h>
@@ -67,35 +66,6 @@ static int vio_sysrq_pressed;
67extern int sysrq_enabled; 66extern int sysrq_enabled;
68#endif 67#endif
69 68
70/*
71 * The structure of the events that flow between us and OS/400. You can't
72 * mess with this unless the OS/400 side changes too
73 */
74struct viocharlpevent {
75 struct HvLpEvent event;
76 u32 reserved;
77 u16 version;
78 u16 subtype_result_code;
79 u8 virtual_device;
80 u8 len;
81 u8 data[VIOCHAR_MAX_DATA];
82};
83
84#define VIOCHAR_WINDOW 10
85#define VIOCHAR_HIGHWATERMARK 3
86
87enum viocharsubtype {
88 viocharopen = 0x0001,
89 viocharclose = 0x0002,
90 viochardata = 0x0003,
91 viocharack = 0x0004,
92 viocharconfig = 0x0005
93};
94
95enum viochar_rc {
96 viochar_rc_ebusy = 1
97};
98
99#define VIOCHAR_NUM_BUF 16 69#define VIOCHAR_NUM_BUF 16
100 70
101/* 71/*
@@ -1183,6 +1153,7 @@ static int __init viocons_init(void)
1183 port_info[i].magic = VIOTTY_MAGIC; 1153 port_info[i].magic = VIOTTY_MAGIC;
1184 } 1154 }
1185 HvCall_setLogBufferFormatAndCodepage(HvCall_LogBuffer_ASCII, 437); 1155 HvCall_setLogBufferFormatAndCodepage(HvCall_LogBuffer_ASCII, 437);
1156 add_preferred_console("viocons", 0, NULL);
1186 register_console(&viocons_early); 1157 register_console(&viocons_early);
1187 return 0; 1158 return 0;
1188} 1159}
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c
index b72b2049aaae..73c78bf75d7f 100644
--- a/drivers/char/viotape.c
+++ b/drivers/char/viotape.c
@@ -940,7 +940,6 @@ static void vioHandleTapeEvent(struct HvLpEvent *event)
940 940
941static int viotape_probe(struct vio_dev *vdev, const struct vio_device_id *id) 941static int viotape_probe(struct vio_dev *vdev, const struct vio_device_id *id)
942{ 942{
943 char tapename[32];
944 int i = vdev->unit_address; 943 int i = vdev->unit_address;
945 int j; 944 int j;
946 945
@@ -956,10 +955,9 @@ static int viotape_probe(struct vio_dev *vdev, const struct vio_device_id *id)
956 "iseries!vt%d", i); 955 "iseries!vt%d", i);
957 class_device_create(tape_class, NULL, MKDEV(VIOTAPE_MAJOR, i | 0x80), 956 class_device_create(tape_class, NULL, MKDEV(VIOTAPE_MAJOR, i | 0x80),
958 NULL, "iseries!nvt%d", i); 957 NULL, "iseries!nvt%d", i);
959 sprintf(tapename, "iseries/vt%d", i); 958 printk(VIOTAPE_KERN_INFO "tape iseries/vt%d is iSeries "
960 printk(VIOTAPE_KERN_INFO "tape %s is iSeries "
961 "resource %10.10s type %4.4s, model %3.3s\n", 959 "resource %10.10s type %4.4s, model %3.3s\n",
962 tapename, viotape_unitinfo[i].rsrcname, 960 i, viotape_unitinfo[i].rsrcname,
963 viotape_unitinfo[i].type, viotape_unitinfo[i].model); 961 viotape_unitinfo[i].type, viotape_unitinfo[i].model);
964 return 0; 962 return 0;
965} 963}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b3df613ae4ec..d35a9f06ab7b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -32,7 +32,7 @@
32#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "cpufreq-core", msg) 32#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "cpufreq-core", msg)
33 33
34/** 34/**
35 * The "cpufreq driver" - the arch- or hardware-dependend low 35 * The "cpufreq driver" - the arch- or hardware-dependent low
36 * level driver of CPUFreq support, and its spinlock. This lock 36 * level driver of CPUFreq support, and its spinlock. This lock
37 * also protects the cpufreq_cpu_data array. 37 * also protects the cpufreq_cpu_data array.
38 */ 38 */
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 52cf1f021825..bf8aa45d4f01 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -55,6 +55,10 @@ struct cpu_dbs_info_s {
55 struct cpufreq_policy *cur_policy; 55 struct cpufreq_policy *cur_policy;
56 struct work_struct work; 56 struct work_struct work;
57 unsigned int enable; 57 unsigned int enable;
58 struct cpufreq_frequency_table *freq_table;
59 unsigned int freq_lo;
60 unsigned int freq_lo_jiffies;
61 unsigned int freq_hi_jiffies;
58}; 62};
59static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 63static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
60 64
@@ -72,15 +76,15 @@ static DEFINE_MUTEX(dbs_mutex);
72 76
73static struct workqueue_struct *kondemand_wq; 77static struct workqueue_struct *kondemand_wq;
74 78
75struct dbs_tuners { 79static struct dbs_tuners {
76 unsigned int sampling_rate; 80 unsigned int sampling_rate;
77 unsigned int up_threshold; 81 unsigned int up_threshold;
78 unsigned int ignore_nice; 82 unsigned int ignore_nice;
79}; 83 unsigned int powersave_bias;
80 84} dbs_tuners_ins = {
81static struct dbs_tuners dbs_tuners_ins = {
82 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 85 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
83 .ignore_nice = 0, 86 .ignore_nice = 0,
87 .powersave_bias = 0,
84}; 88};
85 89
86static inline cputime64_t get_cpu_idle_time(unsigned int cpu) 90static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
@@ -96,6 +100,70 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
96 return retval; 100 return retval;
97} 101}
98 102
103/*
104 * Find right freq to be set now with powersave_bias on.
105 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
106 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
107 */
108static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
109 unsigned int freq_next,
110 unsigned int relation)
111{
112 unsigned int freq_req, freq_reduc, freq_avg;
113 unsigned int freq_hi, freq_lo;
114 unsigned int index = 0;
115 unsigned int jiffies_total, jiffies_hi, jiffies_lo;
116 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu);
117
118 if (!dbs_info->freq_table) {
119 dbs_info->freq_lo = 0;
120 dbs_info->freq_lo_jiffies = 0;
121 return freq_next;
122 }
123
124 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
125 relation, &index);
126 freq_req = dbs_info->freq_table[index].frequency;
127 freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
128 freq_avg = freq_req - freq_reduc;
129
130 /* Find freq bounds for freq_avg in freq_table */
131 index = 0;
132 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
133 CPUFREQ_RELATION_H, &index);
134 freq_lo = dbs_info->freq_table[index].frequency;
135 index = 0;
136 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
137 CPUFREQ_RELATION_L, &index);
138 freq_hi = dbs_info->freq_table[index].frequency;
139
140 /* Find out how long we have to be in hi and lo freqs */
141 if (freq_hi == freq_lo) {
142 dbs_info->freq_lo = 0;
143 dbs_info->freq_lo_jiffies = 0;
144 return freq_lo;
145 }
146 jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
147 jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
148 jiffies_hi += ((freq_hi - freq_lo) / 2);
149 jiffies_hi /= (freq_hi - freq_lo);
150 jiffies_lo = jiffies_total - jiffies_hi;
151 dbs_info->freq_lo = freq_lo;
152 dbs_info->freq_lo_jiffies = jiffies_lo;
153 dbs_info->freq_hi_jiffies = jiffies_hi;
154 return freq_hi;
155}
156
157static void ondemand_powersave_bias_init(void)
158{
159 int i;
160 for_each_online_cpu(i) {
161 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i);
162 dbs_info->freq_table = cpufreq_frequency_get_table(i);
163 dbs_info->freq_lo = 0;
164 }
165}
166
99/************************** sysfs interface ************************/ 167/************************** sysfs interface ************************/
100static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) 168static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
101{ 169{
@@ -124,6 +192,7 @@ static ssize_t show_##file_name \
124show_one(sampling_rate, sampling_rate); 192show_one(sampling_rate, sampling_rate);
125show_one(up_threshold, up_threshold); 193show_one(up_threshold, up_threshold);
126show_one(ignore_nice_load, ignore_nice); 194show_one(ignore_nice_load, ignore_nice);
195show_one(powersave_bias, powersave_bias);
127 196
128static ssize_t store_sampling_rate(struct cpufreq_policy *unused, 197static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
129 const char *buf, size_t count) 198 const char *buf, size_t count)
@@ -198,6 +267,27 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
198 return count; 267 return count;
199} 268}
200 269
270static ssize_t store_powersave_bias(struct cpufreq_policy *unused,
271 const char *buf, size_t count)
272{
273 unsigned int input;
274 int ret;
275 ret = sscanf(buf, "%u", &input);
276
277 if (ret != 1)
278 return -EINVAL;
279
280 if (input > 1000)
281 input = 1000;
282
283 mutex_lock(&dbs_mutex);
284 dbs_tuners_ins.powersave_bias = input;
285 ondemand_powersave_bias_init();
286 mutex_unlock(&dbs_mutex);
287
288 return count;
289}
290
201#define define_one_rw(_name) \ 291#define define_one_rw(_name) \
202static struct freq_attr _name = \ 292static struct freq_attr _name = \
203__ATTR(_name, 0644, show_##_name, store_##_name) 293__ATTR(_name, 0644, show_##_name, store_##_name)
@@ -205,6 +295,7 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
205define_one_rw(sampling_rate); 295define_one_rw(sampling_rate);
206define_one_rw(up_threshold); 296define_one_rw(up_threshold);
207define_one_rw(ignore_nice_load); 297define_one_rw(ignore_nice_load);
298define_one_rw(powersave_bias);
208 299
209static struct attribute * dbs_attributes[] = { 300static struct attribute * dbs_attributes[] = {
210 &sampling_rate_max.attr, 301 &sampling_rate_max.attr,
@@ -212,6 +303,7 @@ static struct attribute * dbs_attributes[] = {
212 &sampling_rate.attr, 303 &sampling_rate.attr,
213 &up_threshold.attr, 304 &up_threshold.attr,
214 &ignore_nice_load.attr, 305 &ignore_nice_load.attr,
306 &powersave_bias.attr,
215 NULL 307 NULL
216}; 308};
217 309
@@ -234,6 +326,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
234 if (!this_dbs_info->enable) 326 if (!this_dbs_info->enable)
235 return; 327 return;
236 328
329 this_dbs_info->freq_lo = 0;
237 policy = this_dbs_info->cur_policy; 330 policy = this_dbs_info->cur_policy;
238 cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); 331 cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
239 total_ticks = (unsigned int) cputime64_sub(cur_jiffies, 332 total_ticks = (unsigned int) cputime64_sub(cur_jiffies,
@@ -274,11 +367,18 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
274 /* Check for frequency increase */ 367 /* Check for frequency increase */
275 if (load > dbs_tuners_ins.up_threshold) { 368 if (load > dbs_tuners_ins.up_threshold) {
276 /* if we are already at full speed then break out early */ 369 /* if we are already at full speed then break out early */
277 if (policy->cur == policy->max) 370 if (!dbs_tuners_ins.powersave_bias) {
278 return; 371 if (policy->cur == policy->max)
279 372 return;
280 __cpufreq_driver_target(policy, policy->max, 373
281 CPUFREQ_RELATION_H); 374 __cpufreq_driver_target(policy, policy->max,
375 CPUFREQ_RELATION_H);
376 } else {
377 int freq = powersave_bias_target(policy, policy->max,
378 CPUFREQ_RELATION_H);
379 __cpufreq_driver_target(policy, freq,
380 CPUFREQ_RELATION_L);
381 }
282 return; 382 return;
283 } 383 }
284 384
@@ -293,37 +393,64 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
293 * policy. To be safe, we focus 10 points under the threshold. 393 * policy. To be safe, we focus 10 points under the threshold.
294 */ 394 */
295 if (load < (dbs_tuners_ins.up_threshold - 10)) { 395 if (load < (dbs_tuners_ins.up_threshold - 10)) {
296 unsigned int freq_next; 396 unsigned int freq_next = (policy->cur * load) /
297 freq_next = (policy->cur * load) /
298 (dbs_tuners_ins.up_threshold - 10); 397 (dbs_tuners_ins.up_threshold - 10);
299 398 if (!dbs_tuners_ins.powersave_bias) {
300 __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); 399 __cpufreq_driver_target(policy, freq_next,
400 CPUFREQ_RELATION_L);
401 } else {
402 int freq = powersave_bias_target(policy, freq_next,
403 CPUFREQ_RELATION_L);
404 __cpufreq_driver_target(policy, freq,
405 CPUFREQ_RELATION_L);
406 }
301 } 407 }
302} 408}
303 409
410/* Sampling types */
411enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
412
304static void do_dbs_timer(void *data) 413static void do_dbs_timer(void *data)
305{ 414{
306 unsigned int cpu = smp_processor_id(); 415 unsigned int cpu = smp_processor_id();
307 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 416 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
417 /* We want all CPUs to do sampling nearly on same jiffy */
418 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
419 delay -= jiffies % delay;
308 420
309 if (!dbs_info->enable) 421 if (!dbs_info->enable)
310 return; 422 return;
311 423 /* Common NORMAL_SAMPLE setup */
312 lock_cpu_hotplug(); 424 INIT_WORK(&dbs_info->work, do_dbs_timer, (void *)DBS_NORMAL_SAMPLE);
313 dbs_check_cpu(dbs_info); 425 if (!dbs_tuners_ins.powersave_bias ||
314 unlock_cpu_hotplug(); 426 (unsigned long) data == DBS_NORMAL_SAMPLE) {
315 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, 427 lock_cpu_hotplug();
316 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 428 dbs_check_cpu(dbs_info);
429 unlock_cpu_hotplug();
430 if (dbs_info->freq_lo) {
431 /* Setup timer for SUB_SAMPLE */
432 INIT_WORK(&dbs_info->work, do_dbs_timer,
433 (void *)DBS_SUB_SAMPLE);
434 delay = dbs_info->freq_hi_jiffies;
435 }
436 } else {
437 __cpufreq_driver_target(dbs_info->cur_policy,
438 dbs_info->freq_lo,
439 CPUFREQ_RELATION_H);
440 }
441 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
317} 442}
318 443
319static inline void dbs_timer_init(unsigned int cpu) 444static inline void dbs_timer_init(unsigned int cpu)
320{ 445{
321 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 446 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
447 /* We want all CPUs to do sampling nearly on same jiffy */
448 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
449 delay -= jiffies % delay;
322 450
323 INIT_WORK(&dbs_info->work, do_dbs_timer, 0); 451 ondemand_powersave_bias_init();
324 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, 452 INIT_WORK(&dbs_info->work, do_dbs_timer, NULL);
325 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 453 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
326 return;
327} 454}
328 455
329static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) 456static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 25eee5394201..c2ecc599dc5f 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -350,12 +350,10 @@ __init cpufreq_stats_init(void)
350 } 350 }
351 351
352 register_hotcpu_notifier(&cpufreq_stat_cpu_notifier); 352 register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
353 lock_cpu_hotplug();
354 for_each_online_cpu(cpu) { 353 for_each_online_cpu(cpu) {
355 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_ONLINE, 354 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_ONLINE,
356 (void *)(long)cpu); 355 (void *)(long)cpu);
357 } 356 }
358 unlock_cpu_hotplug();
359 return 0; 357 return 0;
360} 358}
361static void 359static void
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 4263935443cc..adb554153f67 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -2,22 +2,53 @@ menu "Hardware crypto devices"
2 2
3config CRYPTO_DEV_PADLOCK 3config CRYPTO_DEV_PADLOCK
4 tristate "Support for VIA PadLock ACE" 4 tristate "Support for VIA PadLock ACE"
5 depends on CRYPTO && X86_32 5 depends on X86_32
6 select CRYPTO_ALGAPI
7 default m
6 help 8 help
7 Some VIA processors come with an integrated crypto engine 9 Some VIA processors come with an integrated crypto engine
8 (so called VIA PadLock ACE, Advanced Cryptography Engine) 10 (so called VIA PadLock ACE, Advanced Cryptography Engine)
9 that provides instructions for very fast {en,de}cryption 11 that provides instructions for very fast cryptographic
10 with some algorithms. 12 operations with supported algorithms.
11 13
12 The instructions are used only when the CPU supports them. 14 The instructions are used only when the CPU supports them.
13 Otherwise software encryption is used. If you are unsure, 15 Otherwise software encryption is used.
14 say Y. 16
17 Selecting M for this option will compile a helper module
18 padlock.ko that should autoload all below configured
19 algorithms. Don't worry if your hardware does not support
20 some or all of them. In such case padlock.ko will
21 simply write a single line into the kernel log informing
22 about its failure but everything will keep working fine.
23
24 If you are unsure, say M. The compiled module will be
25 called padlock.ko
15 26
16config CRYPTO_DEV_PADLOCK_AES 27config CRYPTO_DEV_PADLOCK_AES
17 bool "Support for AES in VIA PadLock" 28 tristate "PadLock driver for AES algorithm"
18 depends on CRYPTO_DEV_PADLOCK 29 depends on CRYPTO_DEV_PADLOCK
19 default y 30 select CRYPTO_BLKCIPHER
31 default m
20 help 32 help
21 Use VIA PadLock for AES algorithm. 33 Use VIA PadLock for AES algorithm.
22 34
35 Available in VIA C3 and newer CPUs.
36
37 If unsure say M. The compiled module will be
38 called padlock-aes.ko
39
40config CRYPTO_DEV_PADLOCK_SHA
41 tristate "PadLock driver for SHA1 and SHA256 algorithms"
42 depends on CRYPTO_DEV_PADLOCK
43 select CRYPTO_SHA1
44 select CRYPTO_SHA256
45 default m
46 help
47 Use VIA PadLock for SHA1/SHA256 algorithms.
48
49 Available in VIA C7 and newer processors.
50
51 If unsure say M. The compiled module will be
52 called padlock-sha.ko
53
23endmenu 54endmenu
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 45426ca19a23..4c3d0ec1cf80 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,7 +1,3 @@
1
2obj-$(CONFIG_CRYPTO_DEV_PADLOCK) += padlock.o 1obj-$(CONFIG_CRYPTO_DEV_PADLOCK) += padlock.o
3 2obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
4padlock-objs-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o 3obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
5
6padlock-objs := padlock-generic.o $(padlock-objs-y)
7
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index b643d71298a9..d4501dc7e650 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -43,11 +43,11 @@
43 * --------------------------------------------------------------------------- 43 * ---------------------------------------------------------------------------
44 */ 44 */
45 45
46#include <crypto/algapi.h>
46#include <linux/module.h> 47#include <linux/module.h>
47#include <linux/init.h> 48#include <linux/init.h>
48#include <linux/types.h> 49#include <linux/types.h>
49#include <linux/errno.h> 50#include <linux/errno.h>
50#include <linux/crypto.h>
51#include <linux/interrupt.h> 51#include <linux/interrupt.h>
52#include <linux/kernel.h> 52#include <linux/kernel.h>
53#include <asm/byteorder.h> 53#include <asm/byteorder.h>
@@ -59,6 +59,17 @@
59#define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */ 59#define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */
60#define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t)) 60#define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t))
61 61
62/* Control word. */
63struct cword {
64 unsigned int __attribute__ ((__packed__))
65 rounds:4,
66 algo:3,
67 keygen:1,
68 interm:1,
69 encdec:1,
70 ksize:2;
71} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
72
62/* Whenever making any changes to the following 73/* Whenever making any changes to the following
63 * structure *make sure* you keep E, d_data 74 * structure *make sure* you keep E, d_data
64 * and cword aligned on 16 Bytes boundaries!!! */ 75 * and cword aligned on 16 Bytes boundaries!!! */
@@ -286,9 +297,9 @@ aes_hw_extkey_available(uint8_t key_len)
286 return 0; 297 return 0;
287} 298}
288 299
289static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) 300static inline struct aes_ctx *aes_ctx_common(void *ctx)
290{ 301{
291 unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm); 302 unsigned long addr = (unsigned long)ctx;
292 unsigned long align = PADLOCK_ALIGNMENT; 303 unsigned long align = PADLOCK_ALIGNMENT;
293 304
294 if (align <= crypto_tfm_ctx_alignment()) 305 if (align <= crypto_tfm_ctx_alignment())
@@ -296,16 +307,27 @@ static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
296 return (struct aes_ctx *)ALIGN(addr, align); 307 return (struct aes_ctx *)ALIGN(addr, align);
297} 308}
298 309
310static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
311{
312 return aes_ctx_common(crypto_tfm_ctx(tfm));
313}
314
315static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm)
316{
317 return aes_ctx_common(crypto_blkcipher_ctx(tfm));
318}
319
299static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 320static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
300 unsigned int key_len, u32 *flags) 321 unsigned int key_len)
301{ 322{
302 struct aes_ctx *ctx = aes_ctx(tfm); 323 struct aes_ctx *ctx = aes_ctx(tfm);
303 const __le32 *key = (const __le32 *)in_key; 324 const __le32 *key = (const __le32 *)in_key;
325 u32 *flags = &tfm->crt_flags;
304 uint32_t i, t, u, v, w; 326 uint32_t i, t, u, v, w;
305 uint32_t P[AES_EXTENDED_KEY_SIZE]; 327 uint32_t P[AES_EXTENDED_KEY_SIZE];
306 uint32_t rounds; 328 uint32_t rounds;
307 329
308 if (key_len != 16 && key_len != 24 && key_len != 32) { 330 if (key_len % 8) {
309 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 331 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
310 return -EINVAL; 332 return -EINVAL;
311 } 333 }
@@ -430,80 +452,212 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
430 padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1); 452 padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1);
431} 453}
432 454
433static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out, 455static struct crypto_alg aes_alg = {
434 const u8 *in, unsigned int nbytes) 456 .cra_name = "aes",
457 .cra_driver_name = "aes-padlock",
458 .cra_priority = PADLOCK_CRA_PRIORITY,
459 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
460 .cra_blocksize = AES_BLOCK_SIZE,
461 .cra_ctxsize = sizeof(struct aes_ctx),
462 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
463 .cra_module = THIS_MODULE,
464 .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
465 .cra_u = {
466 .cipher = {
467 .cia_min_keysize = AES_MIN_KEY_SIZE,
468 .cia_max_keysize = AES_MAX_KEY_SIZE,
469 .cia_setkey = aes_set_key,
470 .cia_encrypt = aes_encrypt,
471 .cia_decrypt = aes_decrypt,
472 }
473 }
474};
475
476static int ecb_aes_encrypt(struct blkcipher_desc *desc,
477 struct scatterlist *dst, struct scatterlist *src,
478 unsigned int nbytes)
435{ 479{
436 struct aes_ctx *ctx = aes_ctx(desc->tfm); 480 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
437 padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, 481 struct blkcipher_walk walk;
438 nbytes / AES_BLOCK_SIZE); 482 int err;
439 return nbytes & ~(AES_BLOCK_SIZE - 1); 483
484 blkcipher_walk_init(&walk, dst, src, nbytes);
485 err = blkcipher_walk_virt(desc, &walk);
486
487 while ((nbytes = walk.nbytes)) {
488 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
489 ctx->E, &ctx->cword.encrypt,
490 nbytes / AES_BLOCK_SIZE);
491 nbytes &= AES_BLOCK_SIZE - 1;
492 err = blkcipher_walk_done(desc, &walk, nbytes);
493 }
494
495 return err;
440} 496}
441 497
442static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out, 498static int ecb_aes_decrypt(struct blkcipher_desc *desc,
443 const u8 *in, unsigned int nbytes) 499 struct scatterlist *dst, struct scatterlist *src,
500 unsigned int nbytes)
444{ 501{
445 struct aes_ctx *ctx = aes_ctx(desc->tfm); 502 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
446 padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 503 struct blkcipher_walk walk;
447 nbytes / AES_BLOCK_SIZE); 504 int err;
448 return nbytes & ~(AES_BLOCK_SIZE - 1); 505
506 blkcipher_walk_init(&walk, dst, src, nbytes);
507 err = blkcipher_walk_virt(desc, &walk);
508
509 while ((nbytes = walk.nbytes)) {
510 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
511 ctx->D, &ctx->cword.decrypt,
512 nbytes / AES_BLOCK_SIZE);
513 nbytes &= AES_BLOCK_SIZE - 1;
514 err = blkcipher_walk_done(desc, &walk, nbytes);
515 }
516
517 return err;
449} 518}
450 519
451static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out, 520static struct crypto_alg ecb_aes_alg = {
452 const u8 *in, unsigned int nbytes) 521 .cra_name = "ecb(aes)",
453{ 522 .cra_driver_name = "ecb-aes-padlock",
454 struct aes_ctx *ctx = aes_ctx(desc->tfm); 523 .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
455 u8 *iv; 524 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
525 .cra_blocksize = AES_BLOCK_SIZE,
526 .cra_ctxsize = sizeof(struct aes_ctx),
527 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
528 .cra_type = &crypto_blkcipher_type,
529 .cra_module = THIS_MODULE,
530 .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
531 .cra_u = {
532 .blkcipher = {
533 .min_keysize = AES_MIN_KEY_SIZE,
534 .max_keysize = AES_MAX_KEY_SIZE,
535 .setkey = aes_set_key,
536 .encrypt = ecb_aes_encrypt,
537 .decrypt = ecb_aes_decrypt,
538 }
539 }
540};
456 541
457 iv = padlock_xcrypt_cbc(in, out, ctx->E, desc->info, 542static int cbc_aes_encrypt(struct blkcipher_desc *desc,
458 &ctx->cword.encrypt, nbytes / AES_BLOCK_SIZE); 543 struct scatterlist *dst, struct scatterlist *src,
459 memcpy(desc->info, iv, AES_BLOCK_SIZE); 544 unsigned int nbytes)
545{
546 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
547 struct blkcipher_walk walk;
548 int err;
549
550 blkcipher_walk_init(&walk, dst, src, nbytes);
551 err = blkcipher_walk_virt(desc, &walk);
552
553 while ((nbytes = walk.nbytes)) {
554 u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
555 walk.dst.virt.addr, ctx->E,
556 walk.iv, &ctx->cword.encrypt,
557 nbytes / AES_BLOCK_SIZE);
558 memcpy(walk.iv, iv, AES_BLOCK_SIZE);
559 nbytes &= AES_BLOCK_SIZE - 1;
560 err = blkcipher_walk_done(desc, &walk, nbytes);
561 }
460 562
461 return nbytes & ~(AES_BLOCK_SIZE - 1); 563 return err;
462} 564}
463 565
464static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out, 566static int cbc_aes_decrypt(struct blkcipher_desc *desc,
465 const u8 *in, unsigned int nbytes) 567 struct scatterlist *dst, struct scatterlist *src,
568 unsigned int nbytes)
466{ 569{
467 struct aes_ctx *ctx = aes_ctx(desc->tfm); 570 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
468 padlock_xcrypt_cbc(in, out, ctx->D, desc->info, &ctx->cword.decrypt, 571 struct blkcipher_walk walk;
469 nbytes / AES_BLOCK_SIZE); 572 int err;
470 return nbytes & ~(AES_BLOCK_SIZE - 1); 573
574 blkcipher_walk_init(&walk, dst, src, nbytes);
575 err = blkcipher_walk_virt(desc, &walk);
576
577 while ((nbytes = walk.nbytes)) {
578 padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
579 ctx->D, walk.iv, &ctx->cword.decrypt,
580 nbytes / AES_BLOCK_SIZE);
581 nbytes &= AES_BLOCK_SIZE - 1;
582 err = blkcipher_walk_done(desc, &walk, nbytes);
583 }
584
585 return err;
471} 586}
472 587
473static struct crypto_alg aes_alg = { 588static struct crypto_alg cbc_aes_alg = {
474 .cra_name = "aes", 589 .cra_name = "cbc(aes)",
475 .cra_driver_name = "aes-padlock", 590 .cra_driver_name = "cbc-aes-padlock",
476 .cra_priority = 300, 591 .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
477 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 592 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
478 .cra_blocksize = AES_BLOCK_SIZE, 593 .cra_blocksize = AES_BLOCK_SIZE,
479 .cra_ctxsize = sizeof(struct aes_ctx), 594 .cra_ctxsize = sizeof(struct aes_ctx),
480 .cra_alignmask = PADLOCK_ALIGNMENT - 1, 595 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
596 .cra_type = &crypto_blkcipher_type,
481 .cra_module = THIS_MODULE, 597 .cra_module = THIS_MODULE,
482 .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), 598 .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
483 .cra_u = { 599 .cra_u = {
484 .cipher = { 600 .blkcipher = {
485 .cia_min_keysize = AES_MIN_KEY_SIZE, 601 .min_keysize = AES_MIN_KEY_SIZE,
486 .cia_max_keysize = AES_MAX_KEY_SIZE, 602 .max_keysize = AES_MAX_KEY_SIZE,
487 .cia_setkey = aes_set_key, 603 .ivsize = AES_BLOCK_SIZE,
488 .cia_encrypt = aes_encrypt, 604 .setkey = aes_set_key,
489 .cia_decrypt = aes_decrypt, 605 .encrypt = cbc_aes_encrypt,
490 .cia_encrypt_ecb = aes_encrypt_ecb, 606 .decrypt = cbc_aes_decrypt,
491 .cia_decrypt_ecb = aes_decrypt_ecb,
492 .cia_encrypt_cbc = aes_encrypt_cbc,
493 .cia_decrypt_cbc = aes_decrypt_cbc,
494 } 607 }
495 } 608 }
496}; 609};
497 610
498int __init padlock_init_aes(void) 611static int __init padlock_init(void)
499{ 612{
500 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); 613 int ret;
614
615 if (!cpu_has_xcrypt) {
616 printk(KERN_ERR PFX "VIA PadLock not detected.\n");
617 return -ENODEV;
618 }
619
620 if (!cpu_has_xcrypt_enabled) {
621 printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
622 return -ENODEV;
623 }
501 624
502 gen_tabs(); 625 gen_tabs();
503 return crypto_register_alg(&aes_alg); 626 if ((ret = crypto_register_alg(&aes_alg)))
627 goto aes_err;
628
629 if ((ret = crypto_register_alg(&ecb_aes_alg)))
630 goto ecb_aes_err;
631
632 if ((ret = crypto_register_alg(&cbc_aes_alg)))
633 goto cbc_aes_err;
634
635 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
636
637out:
638 return ret;
639
640cbc_aes_err:
641 crypto_unregister_alg(&ecb_aes_alg);
642ecb_aes_err:
643 crypto_unregister_alg(&aes_alg);
644aes_err:
645 printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
646 goto out;
504} 647}
505 648
506void __exit padlock_fini_aes(void) 649static void __exit padlock_fini(void)
507{ 650{
651 crypto_unregister_alg(&cbc_aes_alg);
652 crypto_unregister_alg(&ecb_aes_alg);
508 crypto_unregister_alg(&aes_alg); 653 crypto_unregister_alg(&aes_alg);
509} 654}
655
656module_init(padlock_init);
657module_exit(padlock_fini);
658
659MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
660MODULE_LICENSE("GPL");
661MODULE_AUTHOR("Michal Ludvig");
662
663MODULE_ALIAS("aes-padlock");
diff --git a/drivers/crypto/padlock-generic.c b/drivers/crypto/padlock-generic.c
deleted file mode 100644
index 18cf0e8274a7..000000000000
--- a/drivers/crypto/padlock-generic.c
+++ /dev/null
@@ -1,63 +0,0 @@
1/*
2 * Cryptographic API.
3 *
4 * Support for VIA PadLock hardware crypto engine.
5 *
6 * Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/crypto.h>
19#include <asm/byteorder.h>
20#include "padlock.h"
21
22static int __init
23padlock_init(void)
24{
25 int ret = -ENOSYS;
26
27 if (!cpu_has_xcrypt) {
28 printk(KERN_ERR PFX "VIA PadLock not detected.\n");
29 return -ENODEV;
30 }
31
32 if (!cpu_has_xcrypt_enabled) {
33 printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
34 return -ENODEV;
35 }
36
37#ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES
38 if ((ret = padlock_init_aes())) {
39 printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
40 return ret;
41 }
42#endif
43
44 if (ret == -ENOSYS)
45 printk(KERN_ERR PFX "Hmm, VIA PadLock was compiled without any algorithm.\n");
46
47 return ret;
48}
49
50static void __exit
51padlock_fini(void)
52{
53#ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES
54 padlock_fini_aes();
55#endif
56}
57
58module_init(padlock_init);
59module_exit(padlock_fini);
60
61MODULE_DESCRIPTION("VIA PadLock crypto engine support.");
62MODULE_LICENSE("Dual BSD/GPL");
63MODULE_AUTHOR("Michal Ludvig");
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
new file mode 100644
index 000000000000..a781fd23b607
--- /dev/null
+++ b/drivers/crypto/padlock-sha.c
@@ -0,0 +1,318 @@
1/*
2 * Cryptographic API.
3 *
4 * Support for VIA PadLock hardware crypto engine.
5 *
6 * Copyright (c) 2006 Michal Ludvig <michal@logix.cz>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 */
14
15#include <crypto/algapi.h>
16#include <linux/err.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/errno.h>
20#include <linux/cryptohash.h>
21#include <linux/interrupt.h>
22#include <linux/kernel.h>
23#include <linux/scatterlist.h>
24#include "padlock.h"
25
26#define SHA1_DEFAULT_FALLBACK "sha1-generic"
27#define SHA1_DIGEST_SIZE 20
28#define SHA1_HMAC_BLOCK_SIZE 64
29
30#define SHA256_DEFAULT_FALLBACK "sha256-generic"
31#define SHA256_DIGEST_SIZE 32
32#define SHA256_HMAC_BLOCK_SIZE 64
33
34struct padlock_sha_ctx {
35 char *data;
36 size_t used;
37 int bypass;
38 void (*f_sha_padlock)(const char *in, char *out, int count);
39 struct hash_desc fallback;
40};
41
42static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm)
43{
44 return crypto_tfm_ctx(tfm);
45}
46
47/* We'll need aligned address on the stack */
48#define NEAREST_ALIGNED(ptr) \
49 ((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT))
50
51static struct crypto_alg sha1_alg, sha256_alg;
52
53static void padlock_sha_bypass(struct crypto_tfm *tfm)
54{
55 if (ctx(tfm)->bypass)
56 return;
57
58 crypto_hash_init(&ctx(tfm)->fallback);
59 if (ctx(tfm)->data && ctx(tfm)->used) {
60 struct scatterlist sg;
61
62 sg_set_buf(&sg, ctx(tfm)->data, ctx(tfm)->used);
63 crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length);
64 }
65
66 ctx(tfm)->used = 0;
67 ctx(tfm)->bypass = 1;
68}
69
70static void padlock_sha_init(struct crypto_tfm *tfm)
71{
72 ctx(tfm)->used = 0;
73 ctx(tfm)->bypass = 0;
74}
75
76static void padlock_sha_update(struct crypto_tfm *tfm,
77 const uint8_t *data, unsigned int length)
78{
79 /* Our buffer is always one page. */
80 if (unlikely(!ctx(tfm)->bypass &&
81 (ctx(tfm)->used + length > PAGE_SIZE)))
82 padlock_sha_bypass(tfm);
83
84 if (unlikely(ctx(tfm)->bypass)) {
85 struct scatterlist sg;
86 sg_set_buf(&sg, (uint8_t *)data, length);
87 crypto_hash_update(&ctx(tfm)->fallback, &sg, length);
88 return;
89 }
90
91 memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length);
92 ctx(tfm)->used += length;
93}
94
95static inline void padlock_output_block(uint32_t *src,
96 uint32_t *dst, size_t count)
97{
98 while (count--)
99 *dst++ = swab32(*src++);
100}
101
102static void padlock_do_sha1(const char *in, char *out, int count)
103{
104 /* We can't store directly to *out as it may be unaligned. */
105 /* BTW Don't reduce the buffer size below 128 Bytes!
106 * PadLock microcode needs it that big. */
107 char buf[128+16];
108 char *result = NEAREST_ALIGNED(buf);
109
110 ((uint32_t *)result)[0] = 0x67452301;
111 ((uint32_t *)result)[1] = 0xEFCDAB89;
112 ((uint32_t *)result)[2] = 0x98BADCFE;
113 ((uint32_t *)result)[3] = 0x10325476;
114 ((uint32_t *)result)[4] = 0xC3D2E1F0;
115
116 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
117 : "+S"(in), "+D"(result)
118 : "c"(count), "a"(0));
119
120 padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
121}
122
123static void padlock_do_sha256(const char *in, char *out, int count)
124{
125 /* We can't store directly to *out as it may be unaligned. */
126 /* BTW Don't reduce the buffer size below 128 Bytes!
127 * PadLock microcode needs it that big. */
128 char buf[128+16];
129 char *result = NEAREST_ALIGNED(buf);
130
131 ((uint32_t *)result)[0] = 0x6A09E667;
132 ((uint32_t *)result)[1] = 0xBB67AE85;
133 ((uint32_t *)result)[2] = 0x3C6EF372;
134 ((uint32_t *)result)[3] = 0xA54FF53A;
135 ((uint32_t *)result)[4] = 0x510E527F;
136 ((uint32_t *)result)[5] = 0x9B05688C;
137 ((uint32_t *)result)[6] = 0x1F83D9AB;
138 ((uint32_t *)result)[7] = 0x5BE0CD19;
139
140 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
141 : "+S"(in), "+D"(result)
142 : "c"(count), "a"(0));
143
144 padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
145}
146
147static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out)
148{
149 if (unlikely(ctx(tfm)->bypass)) {
150 crypto_hash_final(&ctx(tfm)->fallback, out);
151 ctx(tfm)->bypass = 0;
152 return;
153 }
154
155 /* Pass the input buffer to PadLock microcode... */
156 ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used);
157
158 ctx(tfm)->used = 0;
159}
160
161static int padlock_cra_init(struct crypto_tfm *tfm)
162{
163 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
164 struct crypto_hash *fallback_tfm;
165
166 /* For now we'll allocate one page. This
167 * could eventually be configurable one day. */
168 ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL);
169 if (!ctx(tfm)->data)
170 return -ENOMEM;
171
172 /* Allocate a fallback and abort if it failed. */
173 fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0,
174 CRYPTO_ALG_ASYNC |
175 CRYPTO_ALG_NEED_FALLBACK);
176 if (IS_ERR(fallback_tfm)) {
177 printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
178 fallback_driver_name);
179 free_page((unsigned long)(ctx(tfm)->data));
180 return PTR_ERR(fallback_tfm);
181 }
182
183 ctx(tfm)->fallback.tfm = fallback_tfm;
184 return 0;
185}
186
187static int padlock_sha1_cra_init(struct crypto_tfm *tfm)
188{
189 ctx(tfm)->f_sha_padlock = padlock_do_sha1;
190
191 return padlock_cra_init(tfm);
192}
193
194static int padlock_sha256_cra_init(struct crypto_tfm *tfm)
195{
196 ctx(tfm)->f_sha_padlock = padlock_do_sha256;
197
198 return padlock_cra_init(tfm);
199}
200
201static void padlock_cra_exit(struct crypto_tfm *tfm)
202{
203 if (ctx(tfm)->data) {
204 free_page((unsigned long)(ctx(tfm)->data));
205 ctx(tfm)->data = NULL;
206 }
207
208 crypto_free_hash(ctx(tfm)->fallback.tfm);
209 ctx(tfm)->fallback.tfm = NULL;
210}
211
212static struct crypto_alg sha1_alg = {
213 .cra_name = "sha1",
214 .cra_driver_name = "sha1-padlock",
215 .cra_priority = PADLOCK_CRA_PRIORITY,
216 .cra_flags = CRYPTO_ALG_TYPE_DIGEST |
217 CRYPTO_ALG_NEED_FALLBACK,
218 .cra_blocksize = SHA1_HMAC_BLOCK_SIZE,
219 .cra_ctxsize = sizeof(struct padlock_sha_ctx),
220 .cra_module = THIS_MODULE,
221 .cra_list = LIST_HEAD_INIT(sha1_alg.cra_list),
222 .cra_init = padlock_sha1_cra_init,
223 .cra_exit = padlock_cra_exit,
224 .cra_u = {
225 .digest = {
226 .dia_digestsize = SHA1_DIGEST_SIZE,
227 .dia_init = padlock_sha_init,
228 .dia_update = padlock_sha_update,
229 .dia_final = padlock_sha_final,
230 }
231 }
232};
233
234static struct crypto_alg sha256_alg = {
235 .cra_name = "sha256",
236 .cra_driver_name = "sha256-padlock",
237 .cra_priority = PADLOCK_CRA_PRIORITY,
238 .cra_flags = CRYPTO_ALG_TYPE_DIGEST |
239 CRYPTO_ALG_NEED_FALLBACK,
240 .cra_blocksize = SHA256_HMAC_BLOCK_SIZE,
241 .cra_ctxsize = sizeof(struct padlock_sha_ctx),
242 .cra_module = THIS_MODULE,
243 .cra_list = LIST_HEAD_INIT(sha256_alg.cra_list),
244 .cra_init = padlock_sha256_cra_init,
245 .cra_exit = padlock_cra_exit,
246 .cra_u = {
247 .digest = {
248 .dia_digestsize = SHA256_DIGEST_SIZE,
249 .dia_init = padlock_sha_init,
250 .dia_update = padlock_sha_update,
251 .dia_final = padlock_sha_final,
252 }
253 }
254};
255
256static void __init padlock_sha_check_fallbacks(void)
257{
258 if (!crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC |
259 CRYPTO_ALG_NEED_FALLBACK))
260 printk(KERN_WARNING PFX
261 "Couldn't load fallback module for sha1.\n");
262
263 if (!crypto_has_hash("sha256", 0, CRYPTO_ALG_ASYNC |
264 CRYPTO_ALG_NEED_FALLBACK))
265 printk(KERN_WARNING PFX
266 "Couldn't load fallback module for sha256.\n");
267}
268
269static int __init padlock_init(void)
270{
271 int rc = -ENODEV;
272
273 if (!cpu_has_phe) {
274 printk(KERN_ERR PFX "VIA PadLock Hash Engine not detected.\n");
275 return -ENODEV;
276 }
277
278 if (!cpu_has_phe_enabled) {
279 printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
280 return -ENODEV;
281 }
282
283 padlock_sha_check_fallbacks();
284
285 rc = crypto_register_alg(&sha1_alg);
286 if (rc)
287 goto out;
288
289 rc = crypto_register_alg(&sha256_alg);
290 if (rc)
291 goto out_unreg1;
292
293 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");
294
295 return 0;
296
297out_unreg1:
298 crypto_unregister_alg(&sha1_alg);
299out:
300 printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
301 return rc;
302}
303
304static void __exit padlock_fini(void)
305{
306 crypto_unregister_alg(&sha1_alg);
307 crypto_unregister_alg(&sha256_alg);
308}
309
310module_init(padlock_init);
311module_exit(padlock_fini);
312
313MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
314MODULE_LICENSE("GPL");
315MODULE_AUTHOR("Michal Ludvig");
316
317MODULE_ALIAS("sha1-padlock");
318MODULE_ALIAS("sha256-padlock");
diff --git a/drivers/crypto/padlock.c b/drivers/crypto/padlock.c
new file mode 100644
index 000000000000..d6d7dd5bb98c
--- /dev/null
+++ b/drivers/crypto/padlock.c
@@ -0,0 +1,58 @@
1/*
2 * Cryptographic API.
3 *
4 * Support for VIA PadLock hardware crypto engine.
5 *
6 * Copyright (c) 2006 Michal Ludvig <michal@logix.cz>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 */
14
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/errno.h>
18#include <linux/crypto.h>
19#include <linux/cryptohash.h>
20#include <linux/interrupt.h>
21#include <linux/kernel.h>
22#include <linux/scatterlist.h>
23#include "padlock.h"
24
25static int __init padlock_init(void)
26{
27 int success = 0;
28
29 if (crypto_has_cipher("aes-padlock", 0, 0))
30 success++;
31
32 if (crypto_has_hash("sha1-padlock", 0, 0))
33 success++;
34
35 if (crypto_has_hash("sha256-padlock", 0, 0))
36 success++;
37
38 if (!success) {
39 printk(KERN_WARNING PFX "No VIA PadLock drivers have been loaded.\n");
40 return -ENODEV;
41 }
42
43 printk(KERN_NOTICE PFX "%d drivers are available.\n", success);
44
45 return 0;
46}
47
48static void __exit padlock_fini(void)
49{
50}
51
52module_init(padlock_init);
53module_exit(padlock_fini);
54
55MODULE_DESCRIPTION("Load all configured PadLock algorithms.");
56MODULE_LICENSE("GPL");
57MODULE_AUTHOR("Michal Ludvig");
58
diff --git a/drivers/crypto/padlock.h b/drivers/crypto/padlock.h
index b78489bc298a..b728e4518bd1 100644
--- a/drivers/crypto/padlock.h
+++ b/drivers/crypto/padlock.h
@@ -15,22 +15,9 @@
15 15
16#define PADLOCK_ALIGNMENT 16 16#define PADLOCK_ALIGNMENT 16
17 17
18/* Control word. */
19struct cword {
20 unsigned int __attribute__ ((__packed__))
21 rounds:4,
22 algo:3,
23 keygen:1,
24 interm:1,
25 encdec:1,
26 ksize:2;
27} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
28
29#define PFX "padlock: " 18#define PFX "padlock: "
30 19
31#ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES 20#define PADLOCK_CRA_PRIORITY 300
32int padlock_init_aes(void); 21#define PADLOCK_COMPOSITE_PRIORITY 400
33void padlock_fini_aes(void);
34#endif
35 22
36#endif /* _CRYPTO_PADLOCK_H */ 23#endif /* _CRYPTO_PADLOCK_H */
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 53bb43593863..d658d9107955 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -207,7 +207,8 @@ static int i2c_powermac_probe(struct device *dev)
207 struct pmac_i2c_bus *bus = dev->platform_data; 207 struct pmac_i2c_bus *bus = dev->platform_data;
208 struct device_node *parent = NULL; 208 struct device_node *parent = NULL;
209 struct i2c_adapter *adapter; 209 struct i2c_adapter *adapter;
210 char name[32], *basename; 210 char name[32];
211 const char *basename;
211 int rc; 212 int rc;
212 213
213 if (bus == NULL) 214 if (bus == NULL)
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index ebf961f1718d..996c694341bc 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -1154,7 +1154,7 @@ static int
1154pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif) 1154pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
1155{ 1155{
1156 struct device_node *np = pmif->node; 1156 struct device_node *np = pmif->node;
1157 int *bidp; 1157 const int *bidp;
1158 1158
1159 pmif->cable_80 = 0; 1159 pmif->cable_80 = 0;
1160 pmif->broken_dma = pmif->broken_dma_warn = 0; 1160 pmif->broken_dma = pmif->broken_dma_warn = 0;
@@ -1176,14 +1176,14 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
1176 pmif->broken_dma = 1; 1176 pmif->broken_dma = 1;
1177 } 1177 }
1178 1178
1179 bidp = (int *)get_property(np, "AAPL,bus-id", NULL); 1179 bidp = get_property(np, "AAPL,bus-id", NULL);
1180 pmif->aapl_bus_id = bidp ? *bidp : 0; 1180 pmif->aapl_bus_id = bidp ? *bidp : 0;
1181 1181
1182 /* Get cable type from device-tree */ 1182 /* Get cable type from device-tree */
1183 if (pmif->kind == controller_kl_ata4 || pmif->kind == controller_un_ata6 1183 if (pmif->kind == controller_kl_ata4 || pmif->kind == controller_un_ata6
1184 || pmif->kind == controller_k2_ata6 1184 || pmif->kind == controller_k2_ata6
1185 || pmif->kind == controller_sh_ata6) { 1185 || pmif->kind == controller_sh_ata6) {
1186 char* cable = get_property(np, "cable-type", NULL); 1186 const char* cable = get_property(np, "cable-type", NULL);
1187 if (cable && !strncmp(cable, "80-", 3)) 1187 if (cable && !strncmp(cable, "80-", 3))
1188 pmif->cable_80 = 1; 1188 pmif->cable_80 = 1;
1189 } 1189 }
@@ -1326,7 +1326,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
1326 if (macio_irq_count(mdev) == 0) { 1326 if (macio_irq_count(mdev) == 0) {
1327 printk(KERN_WARNING "ide%d: no intrs for device %s, using 13\n", 1327 printk(KERN_WARNING "ide%d: no intrs for device %s, using 13\n",
1328 i, mdev->ofdev.node->full_name); 1328 i, mdev->ofdev.node->full_name);
1329 irq = 13; 1329 irq = irq_create_mapping(NULL, 13);
1330 } else 1330 } else
1331 irq = macio_irq(mdev, 0); 1331 irq = macio_irq(mdev, 0);
1332 1332
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 69a53d476b5b..9edfacee7d84 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -14,7 +14,7 @@ config INFINIBAND_USER_MAD
14 ---help--- 14 ---help---
15 Userspace InfiniBand Management Datagram (MAD) support. This 15 Userspace InfiniBand Management Datagram (MAD) support. This
16 is the kernel side of the userspace MAD support, which allows 16 is the kernel side of the userspace MAD support, which allows
17 userspace processes to send and receive MADs. You will also 17 userspace processes to send and receive MADs. You will also
18 need libibumad from <http://www.openib.org>. 18 need libibumad from <http://www.openib.org>.
19 19
20config INFINIBAND_USER_ACCESS 20config INFINIBAND_USER_ACCESS
@@ -36,6 +36,8 @@ config INFINIBAND_ADDR_TRANS
36 36
37source "drivers/infiniband/hw/mthca/Kconfig" 37source "drivers/infiniband/hw/mthca/Kconfig"
38source "drivers/infiniband/hw/ipath/Kconfig" 38source "drivers/infiniband/hw/ipath/Kconfig"
39source "drivers/infiniband/hw/ehca/Kconfig"
40source "drivers/infiniband/hw/amso1100/Kconfig"
39 41
40source "drivers/infiniband/ulp/ipoib/Kconfig" 42source "drivers/infiniband/ulp/ipoib/Kconfig"
41 43
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index c7ff58c1d0e5..2b5d1098ef45 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -1,6 +1,8 @@
1obj-$(CONFIG_INFINIBAND) += core/ 1obj-$(CONFIG_INFINIBAND) += core/
2obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/ 2obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/
3obj-$(CONFIG_IPATH_CORE) += hw/ipath/ 3obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/
4obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/
5obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/
4obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ 6obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
5obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/ 7obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
6obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/ 8obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 68e73ec2d1f8..163d991eb8c9 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -1,7 +1,7 @@
1infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := ib_addr.o rdma_cm.o 1infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := ib_addr.o rdma_cm.o
2 2
3obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \ 3obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
4 ib_cm.o $(infiniband-y) 4 ib_cm.o iw_cm.o $(infiniband-y)
5obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o 5obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
6obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o 6obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o
7 7
@@ -14,6 +14,8 @@ ib_sa-y := sa_query.o
14 14
15ib_cm-y := cm.o 15ib_cm-y := cm.o
16 16
17iw_cm-y := iwcm.o
18
17rdma_cm-y := cma.o 19rdma_cm-y := cma.o
18 20
19ib_addr-y := addr.o 21ib_addr-y := addr.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 1205e8027829..9cbf09e2052f 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -61,12 +61,15 @@ static LIST_HEAD(req_list);
61static DECLARE_WORK(work, process_req, NULL); 61static DECLARE_WORK(work, process_req, NULL);
62static struct workqueue_struct *addr_wq; 62static struct workqueue_struct *addr_wq;
63 63
64static int copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev, 64int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
65 unsigned char *dst_dev_addr) 65 const unsigned char *dst_dev_addr)
66{ 66{
67 switch (dev->type) { 67 switch (dev->type) {
68 case ARPHRD_INFINIBAND: 68 case ARPHRD_INFINIBAND:
69 dev_addr->dev_type = IB_NODE_CA; 69 dev_addr->dev_type = RDMA_NODE_IB_CA;
70 break;
71 case ARPHRD_ETHER:
72 dev_addr->dev_type = RDMA_NODE_RNIC;
70 break; 73 break;
71 default: 74 default:
72 return -EADDRNOTAVAIL; 75 return -EADDRNOTAVAIL;
@@ -78,6 +81,7 @@ static int copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
78 memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN); 81 memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN);
79 return 0; 82 return 0;
80} 83}
84EXPORT_SYMBOL(rdma_copy_addr);
81 85
82int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) 86int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
83{ 87{
@@ -89,7 +93,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
89 if (!dev) 93 if (!dev)
90 return -EADDRNOTAVAIL; 94 return -EADDRNOTAVAIL;
91 95
92 ret = copy_addr(dev_addr, dev, NULL); 96 ret = rdma_copy_addr(dev_addr, dev, NULL);
93 dev_put(dev); 97 dev_put(dev);
94 return ret; 98 return ret;
95} 99}
@@ -161,7 +165,7 @@ static int addr_resolve_remote(struct sockaddr_in *src_in,
161 165
162 /* If the device does ARP internally, return 'done' */ 166 /* If the device does ARP internally, return 'done' */
163 if (rt->idev->dev->flags & IFF_NOARP) { 167 if (rt->idev->dev->flags & IFF_NOARP) {
164 copy_addr(addr, rt->idev->dev, NULL); 168 rdma_copy_addr(addr, rt->idev->dev, NULL);
165 goto put; 169 goto put;
166 } 170 }
167 171
@@ -181,7 +185,7 @@ static int addr_resolve_remote(struct sockaddr_in *src_in,
181 src_in->sin_addr.s_addr = rt->rt_src; 185 src_in->sin_addr.s_addr = rt->rt_src;
182 } 186 }
183 187
184 ret = copy_addr(addr, neigh->dev, neigh->ha); 188 ret = rdma_copy_addr(addr, neigh->dev, neigh->ha);
185release: 189release:
186 neigh_release(neigh); 190 neigh_release(neigh);
187put: 191put:
@@ -245,7 +249,7 @@ static int addr_resolve_local(struct sockaddr_in *src_in,
245 if (ZERONET(src_ip)) { 249 if (ZERONET(src_ip)) {
246 src_in->sin_family = dst_in->sin_family; 250 src_in->sin_family = dst_in->sin_family;
247 src_in->sin_addr.s_addr = dst_ip; 251 src_in->sin_addr.s_addr = dst_ip;
248 ret = copy_addr(addr, dev, dev->dev_addr); 252 ret = rdma_copy_addr(addr, dev, dev->dev_addr);
249 } else if (LOOPBACK(src_ip)) { 253 } else if (LOOPBACK(src_ip)) {
250 ret = rdma_translate_ip((struct sockaddr *)dst_in, addr); 254 ret = rdma_translate_ip((struct sockaddr *)dst_in, addr);
251 if (!ret) 255 if (!ret)
@@ -327,10 +331,10 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
327} 331}
328EXPORT_SYMBOL(rdma_addr_cancel); 332EXPORT_SYMBOL(rdma_addr_cancel);
329 333
330static int netevent_callback(struct notifier_block *self, unsigned long event, 334static int netevent_callback(struct notifier_block *self, unsigned long event,
331 void *ctx) 335 void *ctx)
332{ 336{
333 if (event == NETEVENT_NEIGH_UPDATE) { 337 if (event == NETEVENT_NEIGH_UPDATE) {
334 struct neighbour *neigh = ctx; 338 struct neighbour *neigh = ctx;
335 339
336 if (neigh->dev->type == ARPHRD_INFINIBAND && 340 if (neigh->dev->type == ARPHRD_INFINIBAND &&
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 75313ade2e0d..20e9f64e67a6 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -62,12 +62,13 @@ struct ib_update_work {
62 62
63static inline int start_port(struct ib_device *device) 63static inline int start_port(struct ib_device *device)
64{ 64{
65 return device->node_type == IB_NODE_SWITCH ? 0 : 1; 65 return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
66} 66}
67 67
68static inline int end_port(struct ib_device *device) 68static inline int end_port(struct ib_device *device)
69{ 69{
70 return device->node_type == IB_NODE_SWITCH ? 0 : device->phys_port_cnt; 70 return (device->node_type == RDMA_NODE_IB_SWITCH) ?
71 0 : device->phys_port_cnt;
71} 72}
72 73
73int ib_get_cached_gid(struct ib_device *device, 74int ib_get_cached_gid(struct ib_device *device,
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 0de335b7bfc2..f35fcc4c0638 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. 2 * Copyright (c) 2004-2006 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. 4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
@@ -41,6 +41,7 @@
41#include <linux/idr.h> 41#include <linux/idr.h>
42#include <linux/interrupt.h> 42#include <linux/interrupt.h>
43#include <linux/pci.h> 43#include <linux/pci.h>
44#include <linux/random.h>
44#include <linux/rbtree.h> 45#include <linux/rbtree.h>
45#include <linux/spinlock.h> 46#include <linux/spinlock.h>
46#include <linux/workqueue.h> 47#include <linux/workqueue.h>
@@ -73,6 +74,7 @@ static struct ib_cm {
73 struct rb_root remote_id_table; 74 struct rb_root remote_id_table;
74 struct rb_root remote_sidr_table; 75 struct rb_root remote_sidr_table;
75 struct idr local_id_table; 76 struct idr local_id_table;
77 __be32 random_id_operand;
76 struct workqueue_struct *wq; 78 struct workqueue_struct *wq;
77} cm; 79} cm;
78 80
@@ -177,7 +179,7 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
177 if (IS_ERR(ah)) 179 if (IS_ERR(ah))
178 return PTR_ERR(ah); 180 return PTR_ERR(ah);
179 181
180 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, 182 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
181 cm_id_priv->av.pkey_index, 183 cm_id_priv->av.pkey_index,
182 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 184 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
183 GFP_ATOMIC); 185 GFP_ATOMIC);
@@ -299,15 +301,17 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
299static int cm_alloc_id(struct cm_id_private *cm_id_priv) 301static int cm_alloc_id(struct cm_id_private *cm_id_priv)
300{ 302{
301 unsigned long flags; 303 unsigned long flags;
302 int ret; 304 int ret, id;
303 static int next_id; 305 static int next_id;
304 306
305 do { 307 do {
306 spin_lock_irqsave(&cm.lock, flags); 308 spin_lock_irqsave(&cm.lock, flags);
307 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, next_id++, 309 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv,
308 (__force int *) &cm_id_priv->id.local_id); 310 next_id++, &id);
309 spin_unlock_irqrestore(&cm.lock, flags); 311 spin_unlock_irqrestore(&cm.lock, flags);
310 } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); 312 } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
313
314 cm_id_priv->id.local_id = (__force __be32) (id ^ cm.random_id_operand);
311 return ret; 315 return ret;
312} 316}
313 317
@@ -316,7 +320,8 @@ static void cm_free_id(__be32 local_id)
316 unsigned long flags; 320 unsigned long flags;
317 321
318 spin_lock_irqsave(&cm.lock, flags); 322 spin_lock_irqsave(&cm.lock, flags);
319 idr_remove(&cm.local_id_table, (__force int) local_id); 323 idr_remove(&cm.local_id_table,
324 (__force int) (local_id ^ cm.random_id_operand));
320 spin_unlock_irqrestore(&cm.lock, flags); 325 spin_unlock_irqrestore(&cm.lock, flags);
321} 326}
322 327
@@ -324,7 +329,8 @@ static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
324{ 329{
325 struct cm_id_private *cm_id_priv; 330 struct cm_id_private *cm_id_priv;
326 331
327 cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id); 332 cm_id_priv = idr_find(&cm.local_id_table,
333 (__force int) (local_id ^ cm.random_id_operand));
328 if (cm_id_priv) { 334 if (cm_id_priv) {
329 if (cm_id_priv->id.remote_id == remote_id) 335 if (cm_id_priv->id.remote_id == remote_id)
330 atomic_inc(&cm_id_priv->refcount); 336 atomic_inc(&cm_id_priv->refcount);
@@ -679,6 +685,8 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
679{ 685{
680 int wait_time; 686 int wait_time;
681 687
688 cm_cleanup_timewait(cm_id_priv->timewait_info);
689
682 /* 690 /*
683 * The cm_id could be destroyed by the user before we exit timewait. 691 * The cm_id could be destroyed by the user before we exit timewait.
684 * To protect against this, we search for the cm_id after exiting 692 * To protect against this, we search for the cm_id after exiting
@@ -1354,7 +1362,7 @@ static int cm_req_handler(struct cm_work *work)
1354 id.local_id); 1362 id.local_id);
1355 if (IS_ERR(cm_id_priv->timewait_info)) { 1363 if (IS_ERR(cm_id_priv->timewait_info)) {
1356 ret = PTR_ERR(cm_id_priv->timewait_info); 1364 ret = PTR_ERR(cm_id_priv->timewait_info);
1357 goto error1; 1365 goto destroy;
1358 } 1366 }
1359 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id; 1367 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1360 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid; 1368 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
@@ -1363,7 +1371,8 @@ static int cm_req_handler(struct cm_work *work)
1363 listen_cm_id_priv = cm_match_req(work, cm_id_priv); 1371 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1364 if (!listen_cm_id_priv) { 1372 if (!listen_cm_id_priv) {
1365 ret = -EINVAL; 1373 ret = -EINVAL;
1366 goto error2; 1374 kfree(cm_id_priv->timewait_info);
1375 goto destroy;
1367 } 1376 }
1368 1377
1369 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; 1378 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
@@ -1373,12 +1382,22 @@ static int cm_req_handler(struct cm_work *work)
1373 1382
1374 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); 1383 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1375 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); 1384 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
1376 if (ret) 1385 if (ret) {
1377 goto error3; 1386 ib_get_cached_gid(work->port->cm_dev->device,
1387 work->port->port_num, 0, &work->path[0].sgid);
1388 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
1389 &work->path[0].sgid, sizeof work->path[0].sgid,
1390 NULL, 0);
1391 goto rejected;
1392 }
1378 if (req_msg->alt_local_lid) { 1393 if (req_msg->alt_local_lid) {
1379 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); 1394 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
1380 if (ret) 1395 if (ret) {
1381 goto error3; 1396 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
1397 &work->path[0].sgid,
1398 sizeof work->path[0].sgid, NULL, 0);
1399 goto rejected;
1400 }
1382 } 1401 }
1383 cm_id_priv->tid = req_msg->hdr.tid; 1402 cm_id_priv->tid = req_msg->hdr.tid;
1384 cm_id_priv->timeout_ms = cm_convert_to_ms( 1403 cm_id_priv->timeout_ms = cm_convert_to_ms(
@@ -1400,12 +1419,11 @@ static int cm_req_handler(struct cm_work *work)
1400 cm_deref_id(listen_cm_id_priv); 1419 cm_deref_id(listen_cm_id_priv);
1401 return 0; 1420 return 0;
1402 1421
1403error3: atomic_dec(&cm_id_priv->refcount); 1422rejected:
1423 atomic_dec(&cm_id_priv->refcount);
1404 cm_deref_id(listen_cm_id_priv); 1424 cm_deref_id(listen_cm_id_priv);
1405 cm_cleanup_timewait(cm_id_priv->timewait_info); 1425destroy:
1406error2: kfree(cm_id_priv->timewait_info); 1426 ib_destroy_cm_id(cm_id);
1407 cm_id_priv->timewait_info = NULL;
1408error1: ib_destroy_cm_id(&cm_id_priv->id);
1409 return ret; 1427 return ret;
1410} 1428}
1411 1429
@@ -2072,8 +2090,9 @@ static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2072 spin_unlock_irqrestore(&cm.lock, flags); 2090 spin_unlock_irqrestore(&cm.lock, flags);
2073 return NULL; 2091 return NULL;
2074 } 2092 }
2075 cm_id_priv = idr_find(&cm.local_id_table, 2093 cm_id_priv = idr_find(&cm.local_id_table, (__force int)
2076 (__force int) timewait_info->work.local_id); 2094 (timewait_info->work.local_id ^
2095 cm.random_id_operand));
2077 if (cm_id_priv) { 2096 if (cm_id_priv) {
2078 if (cm_id_priv->id.remote_id == remote_id) 2097 if (cm_id_priv->id.remote_id == remote_id)
2079 atomic_inc(&cm_id_priv->refcount); 2098 atomic_inc(&cm_id_priv->refcount);
@@ -3125,7 +3144,8 @@ static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3125 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | 3144 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
3126 IB_ACCESS_REMOTE_WRITE; 3145 IB_ACCESS_REMOTE_WRITE;
3127 if (cm_id_priv->responder_resources) 3146 if (cm_id_priv->responder_resources)
3128 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ; 3147 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
3148 IB_ACCESS_REMOTE_ATOMIC;
3129 qp_attr->pkey_index = cm_id_priv->av.pkey_index; 3149 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
3130 qp_attr->port_num = cm_id_priv->av.port->port_num; 3150 qp_attr->port_num = cm_id_priv->av.port->port_num;
3131 ret = 0; 3151 ret = 0;
@@ -3262,6 +3282,9 @@ static void cm_add_one(struct ib_device *device)
3262 int ret; 3282 int ret;
3263 u8 i; 3283 u8 i;
3264 3284
3285 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
3286 return;
3287
3265 cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) * 3288 cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) *
3266 device->phys_port_cnt, GFP_KERNEL); 3289 device->phys_port_cnt, GFP_KERNEL);
3267 if (!cm_dev) 3290 if (!cm_dev)
@@ -3349,6 +3372,7 @@ static int __init ib_cm_init(void)
3349 cm.remote_qp_table = RB_ROOT; 3372 cm.remote_qp_table = RB_ROOT;
3350 cm.remote_sidr_table = RB_ROOT; 3373 cm.remote_sidr_table = RB_ROOT;
3351 idr_init(&cm.local_id_table); 3374 idr_init(&cm.local_id_table);
3375 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
3352 idr_pre_get(&cm.local_id_table, GFP_KERNEL); 3376 idr_pre_get(&cm.local_id_table, GFP_KERNEL);
3353 3377
3354 cm.wq = create_workqueue("ib_cm"); 3378 cm.wq = create_workqueue("ib_cm");
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 5d625a81193f..1178bd434d1b 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -35,6 +35,7 @@
35#include <linux/mutex.h> 35#include <linux/mutex.h>
36#include <linux/random.h> 36#include <linux/random.h>
37#include <linux/idr.h> 37#include <linux/idr.h>
38#include <linux/inetdevice.h>
38 39
39#include <net/tcp.h> 40#include <net/tcp.h>
40 41
@@ -43,6 +44,7 @@
43#include <rdma/ib_cache.h> 44#include <rdma/ib_cache.h>
44#include <rdma/ib_cm.h> 45#include <rdma/ib_cm.h>
45#include <rdma/ib_sa.h> 46#include <rdma/ib_sa.h>
47#include <rdma/iw_cm.h>
46 48
47MODULE_AUTHOR("Sean Hefty"); 49MODULE_AUTHOR("Sean Hefty");
48MODULE_DESCRIPTION("Generic RDMA CM Agent"); 50MODULE_DESCRIPTION("Generic RDMA CM Agent");
@@ -60,6 +62,7 @@ static struct ib_client cma_client = {
60 .remove = cma_remove_one 62 .remove = cma_remove_one
61}; 63};
62 64
65static struct ib_sa_client sa_client;
63static LIST_HEAD(dev_list); 66static LIST_HEAD(dev_list);
64static LIST_HEAD(listen_any_list); 67static LIST_HEAD(listen_any_list);
65static DEFINE_MUTEX(lock); 68static DEFINE_MUTEX(lock);
@@ -124,6 +127,7 @@ struct rdma_id_private {
124 int query_id; 127 int query_id;
125 union { 128 union {
126 struct ib_cm_id *ib; 129 struct ib_cm_id *ib;
130 struct iw_cm_id *iw;
127 } cm_id; 131 } cm_id;
128 132
129 u32 seq_num; 133 u32 seq_num;
@@ -259,15 +263,24 @@ static void cma_detach_from_dev(struct rdma_id_private *id_priv)
259 id_priv->cma_dev = NULL; 263 id_priv->cma_dev = NULL;
260} 264}
261 265
262static int cma_acquire_ib_dev(struct rdma_id_private *id_priv) 266static int cma_acquire_dev(struct rdma_id_private *id_priv)
263{ 267{
268 enum rdma_node_type dev_type = id_priv->id.route.addr.dev_addr.dev_type;
264 struct cma_device *cma_dev; 269 struct cma_device *cma_dev;
265 union ib_gid gid; 270 union ib_gid gid;
266 int ret = -ENODEV; 271 int ret = -ENODEV;
267 272
268 ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid), 273 switch (rdma_node_get_transport(dev_type)) {
274 case RDMA_TRANSPORT_IB:
275 ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
276 break;
277 case RDMA_TRANSPORT_IWARP:
278 iw_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
279 break;
280 default:
281 return -ENODEV;
282 }
269 283
270 mutex_lock(&lock);
271 list_for_each_entry(cma_dev, &dev_list, list) { 284 list_for_each_entry(cma_dev, &dev_list, list) {
272 ret = ib_find_cached_gid(cma_dev->device, &gid, 285 ret = ib_find_cached_gid(cma_dev->device, &gid,
273 &id_priv->id.port_num, NULL); 286 &id_priv->id.port_num, NULL);
@@ -276,20 +289,9 @@ static int cma_acquire_ib_dev(struct rdma_id_private *id_priv)
276 break; 289 break;
277 } 290 }
278 } 291 }
279 mutex_unlock(&lock);
280 return ret; 292 return ret;
281} 293}
282 294
283static int cma_acquire_dev(struct rdma_id_private *id_priv)
284{
285 switch (id_priv->id.route.addr.dev_addr.dev_type) {
286 case IB_NODE_CA:
287 return cma_acquire_ib_dev(id_priv);
288 default:
289 return -ENODEV;
290 }
291}
292
293static void cma_deref_id(struct rdma_id_private *id_priv) 295static void cma_deref_id(struct rdma_id_private *id_priv)
294{ 296{
295 if (atomic_dec_and_test(&id_priv->refcount)) 297 if (atomic_dec_and_test(&id_priv->refcount))
@@ -347,6 +349,16 @@ static int cma_init_ib_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
347 IB_QP_PKEY_INDEX | IB_QP_PORT); 349 IB_QP_PKEY_INDEX | IB_QP_PORT);
348} 350}
349 351
352static int cma_init_iw_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
353{
354 struct ib_qp_attr qp_attr;
355
356 qp_attr.qp_state = IB_QPS_INIT;
357 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
358
359 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS);
360}
361
350int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, 362int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
351 struct ib_qp_init_attr *qp_init_attr) 363 struct ib_qp_init_attr *qp_init_attr)
352{ 364{
@@ -362,10 +374,13 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
362 if (IS_ERR(qp)) 374 if (IS_ERR(qp))
363 return PTR_ERR(qp); 375 return PTR_ERR(qp);
364 376
365 switch (id->device->node_type) { 377 switch (rdma_node_get_transport(id->device->node_type)) {
366 case IB_NODE_CA: 378 case RDMA_TRANSPORT_IB:
367 ret = cma_init_ib_qp(id_priv, qp); 379 ret = cma_init_ib_qp(id_priv, qp);
368 break; 380 break;
381 case RDMA_TRANSPORT_IWARP:
382 ret = cma_init_iw_qp(id_priv, qp);
383 break;
369 default: 384 default:
370 ret = -ENOSYS; 385 ret = -ENOSYS;
371 break; 386 break;
@@ -451,13 +466,17 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
451 int ret; 466 int ret;
452 467
453 id_priv = container_of(id, struct rdma_id_private, id); 468 id_priv = container_of(id, struct rdma_id_private, id);
454 switch (id_priv->id.device->node_type) { 469 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
455 case IB_NODE_CA: 470 case RDMA_TRANSPORT_IB:
456 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, 471 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
457 qp_attr_mask); 472 qp_attr_mask);
458 if (qp_attr->qp_state == IB_QPS_RTR) 473 if (qp_attr->qp_state == IB_QPS_RTR)
459 qp_attr->rq_psn = id_priv->seq_num; 474 qp_attr->rq_psn = id_priv->seq_num;
460 break; 475 break;
476 case RDMA_TRANSPORT_IWARP:
477 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
478 qp_attr_mask);
479 break;
461 default: 480 default:
462 ret = -ENOSYS; 481 ret = -ENOSYS;
463 break; 482 break;
@@ -590,8 +609,8 @@ static int cma_notify_user(struct rdma_id_private *id_priv,
590 609
591static void cma_cancel_route(struct rdma_id_private *id_priv) 610static void cma_cancel_route(struct rdma_id_private *id_priv)
592{ 611{
593 switch (id_priv->id.device->node_type) { 612 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
594 case IB_NODE_CA: 613 case RDMA_TRANSPORT_IB:
595 if (id_priv->query) 614 if (id_priv->query)
596 ib_sa_cancel_query(id_priv->query_id, id_priv->query); 615 ib_sa_cancel_query(id_priv->query_id, id_priv->query);
597 break; 616 break;
@@ -611,11 +630,15 @@ static void cma_destroy_listen(struct rdma_id_private *id_priv)
611 cma_exch(id_priv, CMA_DESTROYING); 630 cma_exch(id_priv, CMA_DESTROYING);
612 631
613 if (id_priv->cma_dev) { 632 if (id_priv->cma_dev) {
614 switch (id_priv->id.device->node_type) { 633 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
615 case IB_NODE_CA: 634 case RDMA_TRANSPORT_IB:
616 if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) 635 if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
617 ib_destroy_cm_id(id_priv->cm_id.ib); 636 ib_destroy_cm_id(id_priv->cm_id.ib);
618 break; 637 break;
638 case RDMA_TRANSPORT_IWARP:
639 if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw))
640 iw_destroy_cm_id(id_priv->cm_id.iw);
641 break;
619 default: 642 default:
620 break; 643 break;
621 } 644 }
@@ -689,19 +712,25 @@ void rdma_destroy_id(struct rdma_cm_id *id)
689 state = cma_exch(id_priv, CMA_DESTROYING); 712 state = cma_exch(id_priv, CMA_DESTROYING);
690 cma_cancel_operation(id_priv, state); 713 cma_cancel_operation(id_priv, state);
691 714
715 mutex_lock(&lock);
692 if (id_priv->cma_dev) { 716 if (id_priv->cma_dev) {
693 switch (id->device->node_type) { 717 mutex_unlock(&lock);
694 case IB_NODE_CA: 718 switch (rdma_node_get_transport(id->device->node_type)) {
695 if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) 719 case RDMA_TRANSPORT_IB:
720 if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
696 ib_destroy_cm_id(id_priv->cm_id.ib); 721 ib_destroy_cm_id(id_priv->cm_id.ib);
697 break; 722 break;
723 case RDMA_TRANSPORT_IWARP:
724 if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw))
725 iw_destroy_cm_id(id_priv->cm_id.iw);
726 break;
698 default: 727 default:
699 break; 728 break;
700 } 729 }
701 mutex_lock(&lock); 730 mutex_lock(&lock);
702 cma_detach_from_dev(id_priv); 731 cma_detach_from_dev(id_priv);
703 mutex_unlock(&lock);
704 } 732 }
733 mutex_unlock(&lock);
705 734
706 cma_release_port(id_priv); 735 cma_release_port(id_priv);
707 cma_deref_id(id_priv); 736 cma_deref_id(id_priv);
@@ -869,7 +898,7 @@ static struct rdma_id_private *cma_new_id(struct rdma_cm_id *listen_id,
869 ib_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); 898 ib_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
870 ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 899 ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
871 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); 900 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
872 rt->addr.dev_addr.dev_type = IB_NODE_CA; 901 rt->addr.dev_addr.dev_type = RDMA_NODE_IB_CA;
873 902
874 id_priv = container_of(id, struct rdma_id_private, id); 903 id_priv = container_of(id, struct rdma_id_private, id);
875 id_priv->state = CMA_CONNECT; 904 id_priv->state = CMA_CONNECT;
@@ -898,7 +927,9 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
898 } 927 }
899 928
900 atomic_inc(&conn_id->dev_remove); 929 atomic_inc(&conn_id->dev_remove);
901 ret = cma_acquire_ib_dev(conn_id); 930 mutex_lock(&lock);
931 ret = cma_acquire_dev(conn_id);
932 mutex_unlock(&lock);
902 if (ret) { 933 if (ret) {
903 ret = -ENODEV; 934 ret = -ENODEV;
904 cma_release_remove(conn_id); 935 cma_release_remove(conn_id);
@@ -982,6 +1013,130 @@ static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
982 } 1013 }
983} 1014}
984 1015
1016static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1017{
1018 struct rdma_id_private *id_priv = iw_id->context;
1019 enum rdma_cm_event_type event = 0;
1020 struct sockaddr_in *sin;
1021 int ret = 0;
1022
1023 atomic_inc(&id_priv->dev_remove);
1024
1025 switch (iw_event->event) {
1026 case IW_CM_EVENT_CLOSE:
1027 event = RDMA_CM_EVENT_DISCONNECTED;
1028 break;
1029 case IW_CM_EVENT_CONNECT_REPLY:
1030 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1031 *sin = iw_event->local_addr;
1032 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;
1033 *sin = iw_event->remote_addr;
1034 if (iw_event->status)
1035 event = RDMA_CM_EVENT_REJECTED;
1036 else
1037 event = RDMA_CM_EVENT_ESTABLISHED;
1038 break;
1039 case IW_CM_EVENT_ESTABLISHED:
1040 event = RDMA_CM_EVENT_ESTABLISHED;
1041 break;
1042 default:
1043 BUG_ON(1);
1044 }
1045
1046 ret = cma_notify_user(id_priv, event, iw_event->status,
1047 iw_event->private_data,
1048 iw_event->private_data_len);
1049 if (ret) {
1050 /* Destroy the CM ID by returning a non-zero value. */
1051 id_priv->cm_id.iw = NULL;
1052 cma_exch(id_priv, CMA_DESTROYING);
1053 cma_release_remove(id_priv);
1054 rdma_destroy_id(&id_priv->id);
1055 return ret;
1056 }
1057
1058 cma_release_remove(id_priv);
1059 return ret;
1060}
1061
1062static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1063 struct iw_cm_event *iw_event)
1064{
1065 struct rdma_cm_id *new_cm_id;
1066 struct rdma_id_private *listen_id, *conn_id;
1067 struct sockaddr_in *sin;
1068 struct net_device *dev = NULL;
1069 int ret;
1070
1071 listen_id = cm_id->context;
1072 atomic_inc(&listen_id->dev_remove);
1073 if (!cma_comp(listen_id, CMA_LISTEN)) {
1074 ret = -ECONNABORTED;
1075 goto out;
1076 }
1077
1078 /* Create a new RDMA id for the new IW CM ID */
1079 new_cm_id = rdma_create_id(listen_id->id.event_handler,
1080 listen_id->id.context,
1081 RDMA_PS_TCP);
1082 if (!new_cm_id) {
1083 ret = -ENOMEM;
1084 goto out;
1085 }
1086 conn_id = container_of(new_cm_id, struct rdma_id_private, id);
1087 atomic_inc(&conn_id->dev_remove);
1088 conn_id->state = CMA_CONNECT;
1089
1090 dev = ip_dev_find(iw_event->local_addr.sin_addr.s_addr);
1091 if (!dev) {
1092 ret = -EADDRNOTAVAIL;
1093 cma_release_remove(conn_id);
1094 rdma_destroy_id(new_cm_id);
1095 goto out;
1096 }
1097 ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL);
1098 if (ret) {
1099 cma_release_remove(conn_id);
1100 rdma_destroy_id(new_cm_id);
1101 goto out;
1102 }
1103
1104 mutex_lock(&lock);
1105 ret = cma_acquire_dev(conn_id);
1106 mutex_unlock(&lock);
1107 if (ret) {
1108 cma_release_remove(conn_id);
1109 rdma_destroy_id(new_cm_id);
1110 goto out;
1111 }
1112
1113 conn_id->cm_id.iw = cm_id;
1114 cm_id->context = conn_id;
1115 cm_id->cm_handler = cma_iw_handler;
1116
1117 sin = (struct sockaddr_in *) &new_cm_id->route.addr.src_addr;
1118 *sin = iw_event->local_addr;
1119 sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr;
1120 *sin = iw_event->remote_addr;
1121
1122 ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0,
1123 iw_event->private_data,
1124 iw_event->private_data_len);
1125 if (ret) {
1126 /* User wants to destroy the CM ID */
1127 conn_id->cm_id.iw = NULL;
1128 cma_exch(conn_id, CMA_DESTROYING);
1129 cma_release_remove(conn_id);
1130 rdma_destroy_id(&conn_id->id);
1131 }
1132
1133out:
1134 if (dev)
1135 dev_put(dev);
1136 cma_release_remove(listen_id);
1137 return ret;
1138}
1139
985static int cma_ib_listen(struct rdma_id_private *id_priv) 1140static int cma_ib_listen(struct rdma_id_private *id_priv)
986{ 1141{
987 struct ib_cm_compare_data compare_data; 1142 struct ib_cm_compare_data compare_data;
@@ -1011,6 +1166,30 @@ static int cma_ib_listen(struct rdma_id_private *id_priv)
1011 return ret; 1166 return ret;
1012} 1167}
1013 1168
1169static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
1170{
1171 int ret;
1172 struct sockaddr_in *sin;
1173
1174 id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device,
1175 iw_conn_req_handler,
1176 id_priv);
1177 if (IS_ERR(id_priv->cm_id.iw))
1178 return PTR_ERR(id_priv->cm_id.iw);
1179
1180 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1181 id_priv->cm_id.iw->local_addr = *sin;
1182
1183 ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
1184
1185 if (ret) {
1186 iw_destroy_cm_id(id_priv->cm_id.iw);
1187 id_priv->cm_id.iw = NULL;
1188 }
1189
1190 return ret;
1191}
1192
1014static int cma_listen_handler(struct rdma_cm_id *id, 1193static int cma_listen_handler(struct rdma_cm_id *id,
1015 struct rdma_cm_event *event) 1194 struct rdma_cm_event *event)
1016{ 1195{
@@ -1087,12 +1266,17 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
1087 1266
1088 id_priv->backlog = backlog; 1267 id_priv->backlog = backlog;
1089 if (id->device) { 1268 if (id->device) {
1090 switch (id->device->node_type) { 1269 switch (rdma_node_get_transport(id->device->node_type)) {
1091 case IB_NODE_CA: 1270 case RDMA_TRANSPORT_IB:
1092 ret = cma_ib_listen(id_priv); 1271 ret = cma_ib_listen(id_priv);
1093 if (ret) 1272 if (ret)
1094 goto err; 1273 goto err;
1095 break; 1274 break;
1275 case RDMA_TRANSPORT_IWARP:
1276 ret = cma_iw_listen(id_priv, backlog);
1277 if (ret)
1278 goto err;
1279 break;
1096 default: 1280 default:
1097 ret = -ENOSYS; 1281 ret = -ENOSYS;
1098 goto err; 1282 goto err;
@@ -1140,7 +1324,7 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
1140 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr)); 1324 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr));
1141 path_rec.numb_path = 1; 1325 path_rec.numb_path = 1;
1142 1326
1143 id_priv->query_id = ib_sa_path_rec_get(id_priv->id.device, 1327 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
1144 id_priv->id.port_num, &path_rec, 1328 id_priv->id.port_num, &path_rec,
1145 IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 1329 IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
1146 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH, 1330 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH,
@@ -1231,6 +1415,23 @@ err:
1231} 1415}
1232EXPORT_SYMBOL(rdma_set_ib_paths); 1416EXPORT_SYMBOL(rdma_set_ib_paths);
1233 1417
1418static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
1419{
1420 struct cma_work *work;
1421
1422 work = kzalloc(sizeof *work, GFP_KERNEL);
1423 if (!work)
1424 return -ENOMEM;
1425
1426 work->id = id_priv;
1427 INIT_WORK(&work->work, cma_work_handler, work);
1428 work->old_state = CMA_ROUTE_QUERY;
1429 work->new_state = CMA_ROUTE_RESOLVED;
1430 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1431 queue_work(cma_wq, &work->work);
1432 return 0;
1433}
1434
1234int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) 1435int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
1235{ 1436{
1236 struct rdma_id_private *id_priv; 1437 struct rdma_id_private *id_priv;
@@ -1241,10 +1442,13 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
1241 return -EINVAL; 1442 return -EINVAL;
1242 1443
1243 atomic_inc(&id_priv->refcount); 1444 atomic_inc(&id_priv->refcount);
1244 switch (id->device->node_type) { 1445 switch (rdma_node_get_transport(id->device->node_type)) {
1245 case IB_NODE_CA: 1446 case RDMA_TRANSPORT_IB:
1246 ret = cma_resolve_ib_route(id_priv, timeout_ms); 1447 ret = cma_resolve_ib_route(id_priv, timeout_ms);
1247 break; 1448 break;
1449 case RDMA_TRANSPORT_IWARP:
1450 ret = cma_resolve_iw_route(id_priv, timeout_ms);
1451 break;
1248 default: 1452 default:
1249 ret = -ENOSYS; 1453 ret = -ENOSYS;
1250 break; 1454 break;
@@ -1309,16 +1513,26 @@ static void addr_handler(int status, struct sockaddr *src_addr,
1309 enum rdma_cm_event_type event; 1513 enum rdma_cm_event_type event;
1310 1514
1311 atomic_inc(&id_priv->dev_remove); 1515 atomic_inc(&id_priv->dev_remove);
1312 if (!id_priv->cma_dev && !status) 1516
1517 /*
1518 * Grab mutex to block rdma_destroy_id() from removing the device while
1519 * we're trying to acquire it.
1520 */
1521 mutex_lock(&lock);
1522 if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) {
1523 mutex_unlock(&lock);
1524 goto out;
1525 }
1526
1527 if (!status && !id_priv->cma_dev)
1313 status = cma_acquire_dev(id_priv); 1528 status = cma_acquire_dev(id_priv);
1529 mutex_unlock(&lock);
1314 1530
1315 if (status) { 1531 if (status) {
1316 if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND)) 1532 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND))
1317 goto out; 1533 goto out;
1318 event = RDMA_CM_EVENT_ADDR_ERROR; 1534 event = RDMA_CM_EVENT_ADDR_ERROR;
1319 } else { 1535 } else {
1320 if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED))
1321 goto out;
1322 memcpy(&id_priv->id.route.addr.src_addr, src_addr, 1536 memcpy(&id_priv->id.route.addr.src_addr, src_addr,
1323 ip_addr_size(src_addr)); 1537 ip_addr_size(src_addr));
1324 event = RDMA_CM_EVENT_ADDR_RESOLVED; 1538 event = RDMA_CM_EVENT_ADDR_RESOLVED;
@@ -1492,7 +1706,7 @@ static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
1492 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { 1706 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
1493 if (cma_any_addr(&cur_id->id.route.addr.src_addr)) 1707 if (cma_any_addr(&cur_id->id.route.addr.src_addr))
1494 return -EADDRNOTAVAIL; 1708 return -EADDRNOTAVAIL;
1495 1709
1496 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr; 1710 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr;
1497 if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr) 1711 if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr)
1498 return -EADDRINUSE; 1712 return -EADDRINUSE;
@@ -1542,8 +1756,11 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
1542 1756
1543 if (!cma_any_addr(addr)) { 1757 if (!cma_any_addr(addr)) {
1544 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); 1758 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
1545 if (!ret) 1759 if (!ret) {
1760 mutex_lock(&lock);
1546 ret = cma_acquire_dev(id_priv); 1761 ret = cma_acquire_dev(id_priv);
1762 mutex_unlock(&lock);
1763 }
1547 if (ret) 1764 if (ret)
1548 goto err; 1765 goto err;
1549 } 1766 }
@@ -1649,6 +1866,47 @@ out:
1649 return ret; 1866 return ret;
1650} 1867}
1651 1868
1869static int cma_connect_iw(struct rdma_id_private *id_priv,
1870 struct rdma_conn_param *conn_param)
1871{
1872 struct iw_cm_id *cm_id;
1873 struct sockaddr_in* sin;
1874 int ret;
1875 struct iw_cm_conn_param iw_param;
1876
1877 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
1878 if (IS_ERR(cm_id)) {
1879 ret = PTR_ERR(cm_id);
1880 goto out;
1881 }
1882
1883 id_priv->cm_id.iw = cm_id;
1884
1885 sin = (struct sockaddr_in*) &id_priv->id.route.addr.src_addr;
1886 cm_id->local_addr = *sin;
1887
1888 sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr;
1889 cm_id->remote_addr = *sin;
1890
1891 ret = cma_modify_qp_rtr(&id_priv->id);
1892 if (ret) {
1893 iw_destroy_cm_id(cm_id);
1894 return ret;
1895 }
1896
1897 iw_param.ord = conn_param->initiator_depth;
1898 iw_param.ird = conn_param->responder_resources;
1899 iw_param.private_data = conn_param->private_data;
1900 iw_param.private_data_len = conn_param->private_data_len;
1901 if (id_priv->id.qp)
1902 iw_param.qpn = id_priv->qp_num;
1903 else
1904 iw_param.qpn = conn_param->qp_num;
1905 ret = iw_cm_connect(cm_id, &iw_param);
1906out:
1907 return ret;
1908}
1909
1652int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 1910int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
1653{ 1911{
1654 struct rdma_id_private *id_priv; 1912 struct rdma_id_private *id_priv;
@@ -1664,10 +1922,13 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
1664 id_priv->srq = conn_param->srq; 1922 id_priv->srq = conn_param->srq;
1665 } 1923 }
1666 1924
1667 switch (id->device->node_type) { 1925 switch (rdma_node_get_transport(id->device->node_type)) {
1668 case IB_NODE_CA: 1926 case RDMA_TRANSPORT_IB:
1669 ret = cma_connect_ib(id_priv, conn_param); 1927 ret = cma_connect_ib(id_priv, conn_param);
1670 break; 1928 break;
1929 case RDMA_TRANSPORT_IWARP:
1930 ret = cma_connect_iw(id_priv, conn_param);
1931 break;
1671 default: 1932 default:
1672 ret = -ENOSYS; 1933 ret = -ENOSYS;
1673 break; 1934 break;
@@ -1708,6 +1969,28 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
1708 return ib_send_cm_rep(id_priv->cm_id.ib, &rep); 1969 return ib_send_cm_rep(id_priv->cm_id.ib, &rep);
1709} 1970}
1710 1971
1972static int cma_accept_iw(struct rdma_id_private *id_priv,
1973 struct rdma_conn_param *conn_param)
1974{
1975 struct iw_cm_conn_param iw_param;
1976 int ret;
1977
1978 ret = cma_modify_qp_rtr(&id_priv->id);
1979 if (ret)
1980 return ret;
1981
1982 iw_param.ord = conn_param->initiator_depth;
1983 iw_param.ird = conn_param->responder_resources;
1984 iw_param.private_data = conn_param->private_data;
1985 iw_param.private_data_len = conn_param->private_data_len;
1986 if (id_priv->id.qp) {
1987 iw_param.qpn = id_priv->qp_num;
1988 } else
1989 iw_param.qpn = conn_param->qp_num;
1990
1991 return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
1992}
1993
1711int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 1994int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
1712{ 1995{
1713 struct rdma_id_private *id_priv; 1996 struct rdma_id_private *id_priv;
@@ -1723,13 +2006,16 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
1723 id_priv->srq = conn_param->srq; 2006 id_priv->srq = conn_param->srq;
1724 } 2007 }
1725 2008
1726 switch (id->device->node_type) { 2009 switch (rdma_node_get_transport(id->device->node_type)) {
1727 case IB_NODE_CA: 2010 case RDMA_TRANSPORT_IB:
1728 if (conn_param) 2011 if (conn_param)
1729 ret = cma_accept_ib(id_priv, conn_param); 2012 ret = cma_accept_ib(id_priv, conn_param);
1730 else 2013 else
1731 ret = cma_rep_recv(id_priv); 2014 ret = cma_rep_recv(id_priv);
1732 break; 2015 break;
2016 case RDMA_TRANSPORT_IWARP:
2017 ret = cma_accept_iw(id_priv, conn_param);
2018 break;
1733 default: 2019 default:
1734 ret = -ENOSYS; 2020 ret = -ENOSYS;
1735 break; 2021 break;
@@ -1756,12 +2042,16 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
1756 if (!cma_comp(id_priv, CMA_CONNECT)) 2042 if (!cma_comp(id_priv, CMA_CONNECT))
1757 return -EINVAL; 2043 return -EINVAL;
1758 2044
1759 switch (id->device->node_type) { 2045 switch (rdma_node_get_transport(id->device->node_type)) {
1760 case IB_NODE_CA: 2046 case RDMA_TRANSPORT_IB:
1761 ret = ib_send_cm_rej(id_priv->cm_id.ib, 2047 ret = ib_send_cm_rej(id_priv->cm_id.ib,
1762 IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, 2048 IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
1763 private_data, private_data_len); 2049 private_data, private_data_len);
1764 break; 2050 break;
2051 case RDMA_TRANSPORT_IWARP:
2052 ret = iw_cm_reject(id_priv->cm_id.iw,
2053 private_data, private_data_len);
2054 break;
1765 default: 2055 default:
1766 ret = -ENOSYS; 2056 ret = -ENOSYS;
1767 break; 2057 break;
@@ -1780,17 +2070,20 @@ int rdma_disconnect(struct rdma_cm_id *id)
1780 !cma_comp(id_priv, CMA_DISCONNECT)) 2070 !cma_comp(id_priv, CMA_DISCONNECT))
1781 return -EINVAL; 2071 return -EINVAL;
1782 2072
1783 ret = cma_modify_qp_err(id); 2073 switch (rdma_node_get_transport(id->device->node_type)) {
1784 if (ret) 2074 case RDMA_TRANSPORT_IB:
1785 goto out; 2075 ret = cma_modify_qp_err(id);
1786 2076 if (ret)
1787 switch (id->device->node_type) { 2077 goto out;
1788 case IB_NODE_CA:
1789 /* Initiate or respond to a disconnect. */ 2078 /* Initiate or respond to a disconnect. */
1790 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) 2079 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
1791 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); 2080 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
1792 break; 2081 break;
2082 case RDMA_TRANSPORT_IWARP:
2083 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
2084 break;
1793 default: 2085 default:
2086 ret = -EINVAL;
1794 break; 2087 break;
1795 } 2088 }
1796out: 2089out:
@@ -1907,12 +2200,15 @@ static int cma_init(void)
1907 if (!cma_wq) 2200 if (!cma_wq)
1908 return -ENOMEM; 2201 return -ENOMEM;
1909 2202
2203 ib_sa_register_client(&sa_client);
2204
1910 ret = ib_register_client(&cma_client); 2205 ret = ib_register_client(&cma_client);
1911 if (ret) 2206 if (ret)
1912 goto err; 2207 goto err;
1913 return 0; 2208 return 0;
1914 2209
1915err: 2210err:
2211 ib_sa_unregister_client(&sa_client);
1916 destroy_workqueue(cma_wq); 2212 destroy_workqueue(cma_wq);
1917 return ret; 2213 return ret;
1918} 2214}
@@ -1920,6 +2216,7 @@ err:
1920static void cma_cleanup(void) 2216static void cma_cleanup(void)
1921{ 2217{
1922 ib_unregister_client(&cma_client); 2218 ib_unregister_client(&cma_client);
2219 ib_sa_unregister_client(&sa_client);
1923 destroy_workqueue(cma_wq); 2220 destroy_workqueue(cma_wq);
1924 idr_destroy(&sdp_ps); 2221 idr_destroy(&sdp_ps);
1925 idr_destroy(&tcp_ps); 2222 idr_destroy(&tcp_ps);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index b2f3cb91d9bc..63d2a39fb82c 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -385,7 +385,7 @@ void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
385EXPORT_SYMBOL(ib_get_client_data); 385EXPORT_SYMBOL(ib_get_client_data);
386 386
387/** 387/**
388 * ib_set_client_data - Get IB client context 388 * ib_set_client_data - Set IB client context
389 * @device:Device to set context for 389 * @device:Device to set context for
390 * @client:Client to set context for 390 * @client:Client to set context for
391 * @data:Context to set 391 * @data:Context to set
@@ -505,7 +505,7 @@ int ib_query_port(struct ib_device *device,
505 u8 port_num, 505 u8 port_num,
506 struct ib_port_attr *port_attr) 506 struct ib_port_attr *port_attr)
507{ 507{
508 if (device->node_type == IB_NODE_SWITCH) { 508 if (device->node_type == RDMA_NODE_IB_SWITCH) {
509 if (port_num) 509 if (port_num)
510 return -EINVAL; 510 return -EINVAL;
511 } else if (port_num < 1 || port_num > device->phys_port_cnt) 511 } else if (port_num < 1 || port_num > device->phys_port_cnt)
@@ -580,7 +580,7 @@ int ib_modify_port(struct ib_device *device,
580 u8 port_num, int port_modify_mask, 580 u8 port_num, int port_modify_mask,
581 struct ib_port_modify *port_modify) 581 struct ib_port_modify *port_modify)
582{ 582{
583 if (device->node_type == IB_NODE_SWITCH) { 583 if (device->node_type == RDMA_NODE_IB_SWITCH) {
584 if (port_num) 584 if (port_num)
585 return -EINVAL; 585 return -EINVAL;
586 } else if (port_num < 1 || port_num > device->phys_port_cnt) 586 } else if (port_num < 1 || port_num > device->phys_port_cnt)
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
new file mode 100644
index 000000000000..c3fb304a4e86
--- /dev/null
+++ b/drivers/infiniband/core/iwcm.c
@@ -0,0 +1,1019 @@
1/*
2 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
7 * Copyright (c) 2005 Network Appliance, Inc. All rights reserved.
8 *
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
14 *
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
17 * conditions are met:
18 *
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer.
22 *
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * SOFTWARE.
36 *
37 */
38#include <linux/dma-mapping.h>
39#include <linux/err.h>
40#include <linux/idr.h>
41#include <linux/interrupt.h>
42#include <linux/pci.h>
43#include <linux/rbtree.h>
44#include <linux/spinlock.h>
45#include <linux/workqueue.h>
46#include <linux/completion.h>
47
48#include <rdma/iw_cm.h>
49#include <rdma/ib_addr.h>
50
51#include "iwcm.h"
52
53MODULE_AUTHOR("Tom Tucker");
54MODULE_DESCRIPTION("iWARP CM");
55MODULE_LICENSE("Dual BSD/GPL");
56
57static struct workqueue_struct *iwcm_wq;
58struct iwcm_work {
59 struct work_struct work;
60 struct iwcm_id_private *cm_id;
61 struct list_head list;
62 struct iw_cm_event event;
63 struct list_head free_list;
64};
65
66/*
67 * The following services provide a mechanism for pre-allocating iwcm_work
68 * elements. The design pre-allocates them based on the cm_id type:
69 * LISTENING IDS: Get enough elements preallocated to handle the
70 * listen backlog.
71 * ACTIVE IDS: 4: CONNECT_REPLY, ESTABLISHED, DISCONNECT, CLOSE
72 * PASSIVE IDS: 3: ESTABLISHED, DISCONNECT, CLOSE
73 *
74 * Allocating them in connect and listen avoids having to deal
75 * with allocation failures on the event upcall from the provider (which
76 * is called in the interrupt context).
77 *
78 * One exception is when creating the cm_id for incoming connection requests.
79 * There are two cases:
80 * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If
81 * the backlog is exceeded, then no more connection request events will
82 * be processed. cm_event_handler() returns -ENOMEM in this case. Its up
83 * to the provider to reject the connectino request.
84 * 2) in the connection request workqueue handler, cm_conn_req_handler().
85 * If work elements cannot be allocated for the new connect request cm_id,
86 * then IWCM will call the provider reject method. This is ok since
87 * cm_conn_req_handler() runs in the workqueue thread context.
88 */
89
90static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv)
91{
92 struct iwcm_work *work;
93
94 if (list_empty(&cm_id_priv->work_free_list))
95 return NULL;
96 work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work,
97 free_list);
98 list_del_init(&work->free_list);
99 return work;
100}
101
102static void put_work(struct iwcm_work *work)
103{
104 list_add(&work->free_list, &work->cm_id->work_free_list);
105}
106
107static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv)
108{
109 struct list_head *e, *tmp;
110
111 list_for_each_safe(e, tmp, &cm_id_priv->work_free_list)
112 kfree(list_entry(e, struct iwcm_work, free_list));
113}
114
115static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
116{
117 struct iwcm_work *work;
118
119 BUG_ON(!list_empty(&cm_id_priv->work_free_list));
120 while (count--) {
121 work = kmalloc(sizeof(struct iwcm_work), GFP_KERNEL);
122 if (!work) {
123 dealloc_work_entries(cm_id_priv);
124 return -ENOMEM;
125 }
126 work->cm_id = cm_id_priv;
127 INIT_LIST_HEAD(&work->list);
128 put_work(work);
129 }
130 return 0;
131}
132
133/*
134 * Save private data from incoming connection requests in the
135 * cm_id_priv so the low level driver doesn't have to. Adjust
136 * the event ptr to point to the local copy.
137 */
138static int copy_private_data(struct iwcm_id_private *cm_id_priv,
139 struct iw_cm_event *event)
140{
141 void *p;
142
143 p = kmalloc(event->private_data_len, GFP_ATOMIC);
144 if (!p)
145 return -ENOMEM;
146 memcpy(p, event->private_data, event->private_data_len);
147 event->private_data = p;
148 return 0;
149}
150
151/*
152 * Release a reference on cm_id. If the last reference is being removed
153 * and iw_destroy_cm_id is waiting, wake up the waiting thread.
154 */
155static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
156{
157 int ret = 0;
158
159 BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
160 if (atomic_dec_and_test(&cm_id_priv->refcount)) {
161 BUG_ON(!list_empty(&cm_id_priv->work_list));
162 if (waitqueue_active(&cm_id_priv->destroy_comp.wait)) {
163 BUG_ON(cm_id_priv->state != IW_CM_STATE_DESTROYING);
164 BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY,
165 &cm_id_priv->flags));
166 ret = 1;
167 }
168 complete(&cm_id_priv->destroy_comp);
169 }
170
171 return ret;
172}
173
174static void add_ref(struct iw_cm_id *cm_id)
175{
176 struct iwcm_id_private *cm_id_priv;
177 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
178 atomic_inc(&cm_id_priv->refcount);
179}
180
181static void rem_ref(struct iw_cm_id *cm_id)
182{
183 struct iwcm_id_private *cm_id_priv;
184 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
185 iwcm_deref_id(cm_id_priv);
186}
187
188static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
189
190struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
191 iw_cm_handler cm_handler,
192 void *context)
193{
194 struct iwcm_id_private *cm_id_priv;
195
196 cm_id_priv = kzalloc(sizeof(*cm_id_priv), GFP_KERNEL);
197 if (!cm_id_priv)
198 return ERR_PTR(-ENOMEM);
199
200 cm_id_priv->state = IW_CM_STATE_IDLE;
201 cm_id_priv->id.device = device;
202 cm_id_priv->id.cm_handler = cm_handler;
203 cm_id_priv->id.context = context;
204 cm_id_priv->id.event_handler = cm_event_handler;
205 cm_id_priv->id.add_ref = add_ref;
206 cm_id_priv->id.rem_ref = rem_ref;
207 spin_lock_init(&cm_id_priv->lock);
208 atomic_set(&cm_id_priv->refcount, 1);
209 init_waitqueue_head(&cm_id_priv->connect_wait);
210 init_completion(&cm_id_priv->destroy_comp);
211 INIT_LIST_HEAD(&cm_id_priv->work_list);
212 INIT_LIST_HEAD(&cm_id_priv->work_free_list);
213
214 return &cm_id_priv->id;
215}
216EXPORT_SYMBOL(iw_create_cm_id);
217
218
219static int iwcm_modify_qp_err(struct ib_qp *qp)
220{
221 struct ib_qp_attr qp_attr;
222
223 if (!qp)
224 return -EINVAL;
225
226 qp_attr.qp_state = IB_QPS_ERR;
227 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
228}
229
230/*
231 * This is really the RDMAC CLOSING state. It is most similar to the
232 * IB SQD QP state.
233 */
234static int iwcm_modify_qp_sqd(struct ib_qp *qp)
235{
236 struct ib_qp_attr qp_attr;
237
238 BUG_ON(qp == NULL);
239 qp_attr.qp_state = IB_QPS_SQD;
240 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
241}
242
243/*
244 * CM_ID <-- CLOSING
245 *
246 * Block if a passive or active connection is currenlty being processed. Then
247 * process the event as follows:
248 * - If we are ESTABLISHED, move to CLOSING and modify the QP state
249 * based on the abrupt flag
250 * - If the connection is already in the CLOSING or IDLE state, the peer is
251 * disconnecting concurrently with us and we've already seen the
252 * DISCONNECT event -- ignore the request and return 0
253 * - Disconnect on a listening endpoint returns -EINVAL
254 */
255int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt)
256{
257 struct iwcm_id_private *cm_id_priv;
258 unsigned long flags;
259 int ret = 0;
260 struct ib_qp *qp = NULL;
261
262 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
263 /* Wait if we're currently in a connect or accept downcall */
264 wait_event(cm_id_priv->connect_wait,
265 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
266
267 spin_lock_irqsave(&cm_id_priv->lock, flags);
268 switch (cm_id_priv->state) {
269 case IW_CM_STATE_ESTABLISHED:
270 cm_id_priv->state = IW_CM_STATE_CLOSING;
271
272 /* QP could be <nul> for user-mode client */
273 if (cm_id_priv->qp)
274 qp = cm_id_priv->qp;
275 else
276 ret = -EINVAL;
277 break;
278 case IW_CM_STATE_LISTEN:
279 ret = -EINVAL;
280 break;
281 case IW_CM_STATE_CLOSING:
282 /* remote peer closed first */
283 case IW_CM_STATE_IDLE:
284 /* accept or connect returned !0 */
285 break;
286 case IW_CM_STATE_CONN_RECV:
287 /*
288 * App called disconnect before/without calling accept after
289 * connect_request event delivered.
290 */
291 break;
292 case IW_CM_STATE_CONN_SENT:
293 /* Can only get here if wait above fails */
294 default:
295 BUG();
296 }
297 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
298
299 if (qp) {
300 if (abrupt)
301 ret = iwcm_modify_qp_err(qp);
302 else
303 ret = iwcm_modify_qp_sqd(qp);
304
305 /*
306 * If both sides are disconnecting the QP could
307 * already be in ERR or SQD states
308 */
309 ret = 0;
310 }
311
312 return ret;
313}
314EXPORT_SYMBOL(iw_cm_disconnect);
315
316/*
317 * CM_ID <-- DESTROYING
318 *
319 * Clean up all resources associated with the connection and release
320 * the initial reference taken by iw_create_cm_id.
321 */
322static void destroy_cm_id(struct iw_cm_id *cm_id)
323{
324 struct iwcm_id_private *cm_id_priv;
325 unsigned long flags;
326 int ret;
327
328 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
329 /*
330 * Wait if we're currently in a connect or accept downcall. A
331 * listening endpoint should never block here.
332 */
333 wait_event(cm_id_priv->connect_wait,
334 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
335
336 spin_lock_irqsave(&cm_id_priv->lock, flags);
337 switch (cm_id_priv->state) {
338 case IW_CM_STATE_LISTEN:
339 cm_id_priv->state = IW_CM_STATE_DESTROYING;
340 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
341 /* destroy the listening endpoint */
342 ret = cm_id->device->iwcm->destroy_listen(cm_id);
343 spin_lock_irqsave(&cm_id_priv->lock, flags);
344 break;
345 case IW_CM_STATE_ESTABLISHED:
346 cm_id_priv->state = IW_CM_STATE_DESTROYING;
347 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
348 /* Abrupt close of the connection */
349 (void)iwcm_modify_qp_err(cm_id_priv->qp);
350 spin_lock_irqsave(&cm_id_priv->lock, flags);
351 break;
352 case IW_CM_STATE_IDLE:
353 case IW_CM_STATE_CLOSING:
354 cm_id_priv->state = IW_CM_STATE_DESTROYING;
355 break;
356 case IW_CM_STATE_CONN_RECV:
357 /*
358 * App called destroy before/without calling accept after
359 * receiving connection request event notification.
360 */
361 cm_id_priv->state = IW_CM_STATE_DESTROYING;
362 break;
363 case IW_CM_STATE_CONN_SENT:
364 case IW_CM_STATE_DESTROYING:
365 default:
366 BUG();
367 break;
368 }
369 if (cm_id_priv->qp) {
370 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
371 cm_id_priv->qp = NULL;
372 }
373 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
374
375 (void)iwcm_deref_id(cm_id_priv);
376}
377
378/*
379 * This function is only called by the application thread and cannot
380 * be called by the event thread. The function will wait for all
381 * references to be released on the cm_id and then kfree the cm_id
382 * object.
383 */
384void iw_destroy_cm_id(struct iw_cm_id *cm_id)
385{
386 struct iwcm_id_private *cm_id_priv;
387
388 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
389 BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags));
390
391 destroy_cm_id(cm_id);
392
393 wait_for_completion(&cm_id_priv->destroy_comp);
394
395 dealloc_work_entries(cm_id_priv);
396
397 kfree(cm_id_priv);
398}
399EXPORT_SYMBOL(iw_destroy_cm_id);
400
401/*
402 * CM_ID <-- LISTEN
403 *
404 * Start listening for connect requests. Generates one CONNECT_REQUEST
405 * event for each inbound connect request.
406 */
407int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
408{
409 struct iwcm_id_private *cm_id_priv;
410 unsigned long flags;
411 int ret = 0;
412
413 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
414
415 ret = alloc_work_entries(cm_id_priv, backlog);
416 if (ret)
417 return ret;
418
419 spin_lock_irqsave(&cm_id_priv->lock, flags);
420 switch (cm_id_priv->state) {
421 case IW_CM_STATE_IDLE:
422 cm_id_priv->state = IW_CM_STATE_LISTEN;
423 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
424 ret = cm_id->device->iwcm->create_listen(cm_id, backlog);
425 if (ret)
426 cm_id_priv->state = IW_CM_STATE_IDLE;
427 spin_lock_irqsave(&cm_id_priv->lock, flags);
428 break;
429 default:
430 ret = -EINVAL;
431 }
432 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
433
434 return ret;
435}
436EXPORT_SYMBOL(iw_cm_listen);
437
438/*
439 * CM_ID <-- IDLE
440 *
441 * Rejects an inbound connection request. No events are generated.
442 */
443int iw_cm_reject(struct iw_cm_id *cm_id,
444 const void *private_data,
445 u8 private_data_len)
446{
447 struct iwcm_id_private *cm_id_priv;
448 unsigned long flags;
449 int ret;
450
451 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
452 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
453
454 spin_lock_irqsave(&cm_id_priv->lock, flags);
455 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
456 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
457 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
458 wake_up_all(&cm_id_priv->connect_wait);
459 return -EINVAL;
460 }
461 cm_id_priv->state = IW_CM_STATE_IDLE;
462 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
463
464 ret = cm_id->device->iwcm->reject(cm_id, private_data,
465 private_data_len);
466
467 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
468 wake_up_all(&cm_id_priv->connect_wait);
469
470 return ret;
471}
472EXPORT_SYMBOL(iw_cm_reject);
473
474/*
475 * CM_ID <-- ESTABLISHED
476 *
477 * Accepts an inbound connection request and generates an ESTABLISHED
478 * event. Callers of iw_cm_disconnect and iw_destroy_cm_id will block
479 * until the ESTABLISHED event is received from the provider.
480 */
481int iw_cm_accept(struct iw_cm_id *cm_id,
482 struct iw_cm_conn_param *iw_param)
483{
484 struct iwcm_id_private *cm_id_priv;
485 struct ib_qp *qp;
486 unsigned long flags;
487 int ret;
488
489 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
490 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
491
492 spin_lock_irqsave(&cm_id_priv->lock, flags);
493 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
494 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
495 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
496 wake_up_all(&cm_id_priv->connect_wait);
497 return -EINVAL;
498 }
499 /* Get the ib_qp given the QPN */
500 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
501 if (!qp) {
502 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
503 return -EINVAL;
504 }
505 cm_id->device->iwcm->add_ref(qp);
506 cm_id_priv->qp = qp;
507 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
508
509 ret = cm_id->device->iwcm->accept(cm_id, iw_param);
510 if (ret) {
511 /* An error on accept precludes provider events */
512 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
513 cm_id_priv->state = IW_CM_STATE_IDLE;
514 spin_lock_irqsave(&cm_id_priv->lock, flags);
515 if (cm_id_priv->qp) {
516 cm_id->device->iwcm->rem_ref(qp);
517 cm_id_priv->qp = NULL;
518 }
519 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
520 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
521 wake_up_all(&cm_id_priv->connect_wait);
522 }
523
524 return ret;
525}
526EXPORT_SYMBOL(iw_cm_accept);
527
528/*
529 * Active Side: CM_ID <-- CONN_SENT
530 *
531 * If successful, results in the generation of a CONNECT_REPLY
532 * event. iw_cm_disconnect and iw_cm_destroy will block until the
533 * CONNECT_REPLY event is received from the provider.
534 */
535int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
536{
537 struct iwcm_id_private *cm_id_priv;
538 int ret = 0;
539 unsigned long flags;
540 struct ib_qp *qp;
541
542 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
543
544 ret = alloc_work_entries(cm_id_priv, 4);
545 if (ret)
546 return ret;
547
548 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
549 spin_lock_irqsave(&cm_id_priv->lock, flags);
550
551 if (cm_id_priv->state != IW_CM_STATE_IDLE) {
552 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
553 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
554 wake_up_all(&cm_id_priv->connect_wait);
555 return -EINVAL;
556 }
557
558 /* Get the ib_qp given the QPN */
559 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
560 if (!qp) {
561 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
562 return -EINVAL;
563 }
564 cm_id->device->iwcm->add_ref(qp);
565 cm_id_priv->qp = qp;
566 cm_id_priv->state = IW_CM_STATE_CONN_SENT;
567 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
568
569 ret = cm_id->device->iwcm->connect(cm_id, iw_param);
570 if (ret) {
571 spin_lock_irqsave(&cm_id_priv->lock, flags);
572 if (cm_id_priv->qp) {
573 cm_id->device->iwcm->rem_ref(qp);
574 cm_id_priv->qp = NULL;
575 }
576 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
577 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
578 cm_id_priv->state = IW_CM_STATE_IDLE;
579 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
580 wake_up_all(&cm_id_priv->connect_wait);
581 }
582
583 return ret;
584}
585EXPORT_SYMBOL(iw_cm_connect);
586
587/*
588 * Passive Side: new CM_ID <-- CONN_RECV
589 *
590 * Handles an inbound connect request. The function creates a new
591 * iw_cm_id to represent the new connection and inherits the client
592 * callback function and other attributes from the listening parent.
593 *
594 * The work item contains a pointer to the listen_cm_id and the event. The
595 * listen_cm_id contains the client cm_handler, context and
596 * device. These are copied when the device is cloned. The event
597 * contains the new four tuple.
598 *
599 * An error on the child should not affect the parent, so this
600 * function does not return a value.
601 */
602static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
603 struct iw_cm_event *iw_event)
604{
605 unsigned long flags;
606 struct iw_cm_id *cm_id;
607 struct iwcm_id_private *cm_id_priv;
608 int ret;
609
610 /*
611 * The provider should never generate a connection request
612 * event with a bad status.
613 */
614 BUG_ON(iw_event->status);
615
616 /*
617 * We could be destroying the listening id. If so, ignore this
618 * upcall.
619 */
620 spin_lock_irqsave(&listen_id_priv->lock, flags);
621 if (listen_id_priv->state != IW_CM_STATE_LISTEN) {
622 spin_unlock_irqrestore(&listen_id_priv->lock, flags);
623 return;
624 }
625 spin_unlock_irqrestore(&listen_id_priv->lock, flags);
626
627 cm_id = iw_create_cm_id(listen_id_priv->id.device,
628 listen_id_priv->id.cm_handler,
629 listen_id_priv->id.context);
630 /* If the cm_id could not be created, ignore the request */
631 if (IS_ERR(cm_id))
632 return;
633
634 cm_id->provider_data = iw_event->provider_data;
635 cm_id->local_addr = iw_event->local_addr;
636 cm_id->remote_addr = iw_event->remote_addr;
637
638 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
639 cm_id_priv->state = IW_CM_STATE_CONN_RECV;
640
641 ret = alloc_work_entries(cm_id_priv, 3);
642 if (ret) {
643 iw_cm_reject(cm_id, NULL, 0);
644 iw_destroy_cm_id(cm_id);
645 return;
646 }
647
648 /* Call the client CM handler */
649 ret = cm_id->cm_handler(cm_id, iw_event);
650 if (ret) {
651 set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
652 destroy_cm_id(cm_id);
653 if (atomic_read(&cm_id_priv->refcount)==0)
654 kfree(cm_id);
655 }
656
657 if (iw_event->private_data_len)
658 kfree(iw_event->private_data);
659}
660
661/*
662 * Passive Side: CM_ID <-- ESTABLISHED
663 *
664 * The provider generated an ESTABLISHED event which means that
665 * the MPA negotion has completed successfully and we are now in MPA
666 * FPDU mode.
667 *
668 * This event can only be received in the CONN_RECV state. If the
669 * remote peer closed, the ESTABLISHED event would be received followed
670 * by the CLOSE event. If the app closes, it will block until we wake
671 * it up after processing this event.
672 */
673static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
674 struct iw_cm_event *iw_event)
675{
676 unsigned long flags;
677 int ret = 0;
678
679 spin_lock_irqsave(&cm_id_priv->lock, flags);
680
681 /*
682 * We clear the CONNECT_WAIT bit here to allow the callback
683 * function to call iw_cm_disconnect. Calling iw_destroy_cm_id
684 * from a callback handler is not allowed.
685 */
686 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
687 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
688 cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
689 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
690 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
691 wake_up_all(&cm_id_priv->connect_wait);
692
693 return ret;
694}
695
696/*
697 * Active Side: CM_ID <-- ESTABLISHED
698 *
699 * The app has called connect and is waiting for the established event to
700 * post it's requests to the server. This event will wake up anyone
701 * blocked in iw_cm_disconnect or iw_destroy_id.
702 */
703static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
704 struct iw_cm_event *iw_event)
705{
706 unsigned long flags;
707 int ret = 0;
708
709 spin_lock_irqsave(&cm_id_priv->lock, flags);
710 /*
711 * Clear the connect wait bit so a callback function calling
712 * iw_cm_disconnect will not wait and deadlock this thread
713 */
714 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
715 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
716 if (iw_event->status == IW_CM_EVENT_STATUS_ACCEPTED) {
717 cm_id_priv->id.local_addr = iw_event->local_addr;
718 cm_id_priv->id.remote_addr = iw_event->remote_addr;
719 cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
720 } else {
721 /* REJECTED or RESET */
722 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
723 cm_id_priv->qp = NULL;
724 cm_id_priv->state = IW_CM_STATE_IDLE;
725 }
726 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
727 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
728
729 if (iw_event->private_data_len)
730 kfree(iw_event->private_data);
731
732 /* Wake up waiters on connect complete */
733 wake_up_all(&cm_id_priv->connect_wait);
734
735 return ret;
736}
737
738/*
739 * CM_ID <-- CLOSING
740 *
741 * If in the ESTABLISHED state, move to CLOSING.
742 */
743static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv,
744 struct iw_cm_event *iw_event)
745{
746 unsigned long flags;
747
748 spin_lock_irqsave(&cm_id_priv->lock, flags);
749 if (cm_id_priv->state == IW_CM_STATE_ESTABLISHED)
750 cm_id_priv->state = IW_CM_STATE_CLOSING;
751 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
752}
753
754/*
755 * CM_ID <-- IDLE
756 *
757 * If in the ESTBLISHED or CLOSING states, the QP will have have been
758 * moved by the provider to the ERR state. Disassociate the CM_ID from
759 * the QP, move to IDLE, and remove the 'connected' reference.
760 *
761 * If in some other state, the cm_id was destroyed asynchronously.
762 * This is the last reference that will result in waking up
763 * the app thread blocked in iw_destroy_cm_id.
764 */
765static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
766 struct iw_cm_event *iw_event)
767{
768 unsigned long flags;
769 int ret = 0;
770 spin_lock_irqsave(&cm_id_priv->lock, flags);
771
772 if (cm_id_priv->qp) {
773 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
774 cm_id_priv->qp = NULL;
775 }
776 switch (cm_id_priv->state) {
777 case IW_CM_STATE_ESTABLISHED:
778 case IW_CM_STATE_CLOSING:
779 cm_id_priv->state = IW_CM_STATE_IDLE;
780 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
781 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
782 spin_lock_irqsave(&cm_id_priv->lock, flags);
783 break;
784 case IW_CM_STATE_DESTROYING:
785 break;
786 default:
787 BUG();
788 }
789 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
790
791 return ret;
792}
793
794static int process_event(struct iwcm_id_private *cm_id_priv,
795 struct iw_cm_event *iw_event)
796{
797 int ret = 0;
798
799 switch (iw_event->event) {
800 case IW_CM_EVENT_CONNECT_REQUEST:
801 cm_conn_req_handler(cm_id_priv, iw_event);
802 break;
803 case IW_CM_EVENT_CONNECT_REPLY:
804 ret = cm_conn_rep_handler(cm_id_priv, iw_event);
805 break;
806 case IW_CM_EVENT_ESTABLISHED:
807 ret = cm_conn_est_handler(cm_id_priv, iw_event);
808 break;
809 case IW_CM_EVENT_DISCONNECT:
810 cm_disconnect_handler(cm_id_priv, iw_event);
811 break;
812 case IW_CM_EVENT_CLOSE:
813 ret = cm_close_handler(cm_id_priv, iw_event);
814 break;
815 default:
816 BUG();
817 }
818
819 return ret;
820}
821
822/*
823 * Process events on the work_list for the cm_id. If the callback
824 * function requests that the cm_id be deleted, a flag is set in the
825 * cm_id flags to indicate that when the last reference is
826 * removed, the cm_id is to be destroyed. This is necessary to
827 * distinguish between an object that will be destroyed by the app
828 * thread asleep on the destroy_comp list vs. an object destroyed
829 * here synchronously when the last reference is removed.
830 */
831static void cm_work_handler(void *arg)
832{
833 struct iwcm_work *work = arg, lwork;
834 struct iwcm_id_private *cm_id_priv = work->cm_id;
835 unsigned long flags;
836 int empty;
837 int ret = 0;
838
839 spin_lock_irqsave(&cm_id_priv->lock, flags);
840 empty = list_empty(&cm_id_priv->work_list);
841 while (!empty) {
842 work = list_entry(cm_id_priv->work_list.next,
843 struct iwcm_work, list);
844 list_del_init(&work->list);
845 empty = list_empty(&cm_id_priv->work_list);
846 lwork = *work;
847 put_work(work);
848 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
849
850 ret = process_event(cm_id_priv, &work->event);
851 if (ret) {
852 set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
853 destroy_cm_id(&cm_id_priv->id);
854 }
855 BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
856 if (iwcm_deref_id(cm_id_priv))
857 return;
858
859 if (atomic_read(&cm_id_priv->refcount)==0 &&
860 test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) {
861 dealloc_work_entries(cm_id_priv);
862 kfree(cm_id_priv);
863 return;
864 }
865 spin_lock_irqsave(&cm_id_priv->lock, flags);
866 }
867 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
868}
869
870/*
871 * This function is called on interrupt context. Schedule events on
872 * the iwcm_wq thread to allow callback functions to downcall into
873 * the CM and/or block. Events are queued to a per-CM_ID
874 * work_list. If this is the first event on the work_list, the work
875 * element is also queued on the iwcm_wq thread.
876 *
877 * Each event holds a reference on the cm_id. Until the last posted
878 * event has been delivered and processed, the cm_id cannot be
879 * deleted.
880 *
881 * Returns:
882 * 0 - the event was handled.
883 * -ENOMEM - the event was not handled due to lack of resources.
884 */
885static int cm_event_handler(struct iw_cm_id *cm_id,
886 struct iw_cm_event *iw_event)
887{
888 struct iwcm_work *work;
889 struct iwcm_id_private *cm_id_priv;
890 unsigned long flags;
891 int ret = 0;
892
893 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
894
895 spin_lock_irqsave(&cm_id_priv->lock, flags);
896 work = get_work(cm_id_priv);
897 if (!work) {
898 ret = -ENOMEM;
899 goto out;
900 }
901
902 INIT_WORK(&work->work, cm_work_handler, work);
903 work->cm_id = cm_id_priv;
904 work->event = *iw_event;
905
906 if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST ||
907 work->event.event == IW_CM_EVENT_CONNECT_REPLY) &&
908 work->event.private_data_len) {
909 ret = copy_private_data(cm_id_priv, &work->event);
910 if (ret) {
911 put_work(work);
912 goto out;
913 }
914 }
915
916 atomic_inc(&cm_id_priv->refcount);
917 if (list_empty(&cm_id_priv->work_list)) {
918 list_add_tail(&work->list, &cm_id_priv->work_list);
919 queue_work(iwcm_wq, &work->work);
920 } else
921 list_add_tail(&work->list, &cm_id_priv->work_list);
922out:
923 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
924 return ret;
925}
926
927static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv,
928 struct ib_qp_attr *qp_attr,
929 int *qp_attr_mask)
930{
931 unsigned long flags;
932 int ret;
933
934 spin_lock_irqsave(&cm_id_priv->lock, flags);
935 switch (cm_id_priv->state) {
936 case IW_CM_STATE_IDLE:
937 case IW_CM_STATE_CONN_SENT:
938 case IW_CM_STATE_CONN_RECV:
939 case IW_CM_STATE_ESTABLISHED:
940 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
941 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
942 IB_ACCESS_REMOTE_WRITE|
943 IB_ACCESS_REMOTE_READ;
944 ret = 0;
945 break;
946 default:
947 ret = -EINVAL;
948 break;
949 }
950 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
951 return ret;
952}
953
954static int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv,
955 struct ib_qp_attr *qp_attr,
956 int *qp_attr_mask)
957{
958 unsigned long flags;
959 int ret;
960
961 spin_lock_irqsave(&cm_id_priv->lock, flags);
962 switch (cm_id_priv->state) {
963 case IW_CM_STATE_IDLE:
964 case IW_CM_STATE_CONN_SENT:
965 case IW_CM_STATE_CONN_RECV:
966 case IW_CM_STATE_ESTABLISHED:
967 *qp_attr_mask = 0;
968 ret = 0;
969 break;
970 default:
971 ret = -EINVAL;
972 break;
973 }
974 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
975 return ret;
976}
977
978int iw_cm_init_qp_attr(struct iw_cm_id *cm_id,
979 struct ib_qp_attr *qp_attr,
980 int *qp_attr_mask)
981{
982 struct iwcm_id_private *cm_id_priv;
983 int ret;
984
985 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
986 switch (qp_attr->qp_state) {
987 case IB_QPS_INIT:
988 case IB_QPS_RTR:
989 ret = iwcm_init_qp_init_attr(cm_id_priv,
990 qp_attr, qp_attr_mask);
991 break;
992 case IB_QPS_RTS:
993 ret = iwcm_init_qp_rts_attr(cm_id_priv,
994 qp_attr, qp_attr_mask);
995 break;
996 default:
997 ret = -EINVAL;
998 break;
999 }
1000 return ret;
1001}
1002EXPORT_SYMBOL(iw_cm_init_qp_attr);
1003
1004static int __init iw_cm_init(void)
1005{
1006 iwcm_wq = create_singlethread_workqueue("iw_cm_wq");
1007 if (!iwcm_wq)
1008 return -ENOMEM;
1009
1010 return 0;
1011}
1012
1013static void __exit iw_cm_cleanup(void)
1014{
1015 destroy_workqueue(iwcm_wq);
1016}
1017
1018module_init(iw_cm_init);
1019module_exit(iw_cm_cleanup);
diff --git a/drivers/infiniband/core/iwcm.h b/drivers/infiniband/core/iwcm.h
new file mode 100644
index 000000000000..3f6cc82564c8
--- /dev/null
+++ b/drivers/infiniband/core/iwcm.h
@@ -0,0 +1,62 @@
1/*
2 * Copyright (c) 2005 Network Appliance, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef IWCM_H
34#define IWCM_H
35
36enum iw_cm_state {
37 IW_CM_STATE_IDLE, /* unbound, inactive */
38 IW_CM_STATE_LISTEN, /* listen waiting for connect */
39 IW_CM_STATE_CONN_RECV, /* inbound waiting for user accept */
40 IW_CM_STATE_CONN_SENT, /* outbound waiting for peer accept */
41 IW_CM_STATE_ESTABLISHED, /* established */
42 IW_CM_STATE_CLOSING, /* disconnect */
43 IW_CM_STATE_DESTROYING /* object being deleted */
44};
45
46struct iwcm_id_private {
47 struct iw_cm_id id;
48 enum iw_cm_state state;
49 unsigned long flags;
50 struct ib_qp *qp;
51 struct completion destroy_comp;
52 wait_queue_head_t connect_wait;
53 struct list_head work_list;
54 spinlock_t lock;
55 atomic_t refcount;
56 struct list_head work_free_list;
57};
58
59#define IWCM_F_CALLBACK_DESTROY 1
60#define IWCM_F_CONNECT_WAIT 2
61
62#endif /* IWCM_H */
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 1c3cfbbe6a97..082f03c158f0 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1246,8 +1246,8 @@ static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1246 int i; 1246 int i;
1247 1247
1248 for (i = 0; i < MAX_MGMT_OUI; i++) 1248 for (i = 0; i < MAX_MGMT_OUI; i++)
1249 /* Is there matching OUI for this vendor class ? */ 1249 /* Is there matching OUI for this vendor class ? */
1250 if (!memcmp(vendor_class->oui[i], oui, 3)) 1250 if (!memcmp(vendor_class->oui[i], oui, 3))
1251 return i; 1251 return i;
1252 1252
1253 return -1; 1253 return -1;
@@ -2237,7 +2237,7 @@ static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2237 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, 2237 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2238 &mad_agent_priv->send_list, agent_list) { 2238 &mad_agent_priv->send_list, agent_list) {
2239 if (mad_send_wr->status == IB_WC_SUCCESS) { 2239 if (mad_send_wr->status == IB_WC_SUCCESS) {
2240 mad_send_wr->status = IB_WC_WR_FLUSH_ERR; 2240 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2241 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2241 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2242 } 2242 }
2243 } 2243 }
@@ -2528,10 +2528,10 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2528 } 2528 }
2529 } 2529 }
2530 sg_list.addr = dma_map_single(qp_info->port_priv-> 2530 sg_list.addr = dma_map_single(qp_info->port_priv->
2531 device->dma_device, 2531 device->dma_device,
2532 &mad_priv->grh, 2532 &mad_priv->grh,
2533 sizeof *mad_priv - 2533 sizeof *mad_priv -
2534 sizeof mad_priv->header, 2534 sizeof mad_priv->header,
2535 DMA_FROM_DEVICE); 2535 DMA_FROM_DEVICE);
2536 pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr); 2536 pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
2537 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; 2537 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
@@ -2606,7 +2606,7 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2606 struct ib_qp *qp; 2606 struct ib_qp *qp;
2607 2607
2608 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2608 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2609 if (!attr) { 2609 if (!attr) {
2610 printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n"); 2610 printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
2611 return -ENOMEM; 2611 return -ENOMEM;
2612 } 2612 }
@@ -2876,7 +2876,10 @@ static void ib_mad_init_device(struct ib_device *device)
2876{ 2876{
2877 int start, end, i; 2877 int start, end, i;
2878 2878
2879 if (device->node_type == IB_NODE_SWITCH) { 2879 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
2880 return;
2881
2882 if (device->node_type == RDMA_NODE_IB_SWITCH) {
2880 start = 0; 2883 start = 0;
2881 end = 0; 2884 end = 0;
2882 } else { 2885 } else {
@@ -2923,7 +2926,7 @@ static void ib_mad_remove_device(struct ib_device *device)
2923{ 2926{
2924 int i, num_ports, cur_port; 2927 int i, num_ports, cur_port;
2925 2928
2926 if (device->node_type == IB_NODE_SWITCH) { 2929 if (device->node_type == RDMA_NODE_IB_SWITCH) {
2927 num_ports = 1; 2930 num_ports = 1;
2928 cur_port = 0; 2931 cur_port = 0;
2929 } else { 2932 } else {
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index d147f3bad2ce..d06b59083f6e 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -38,8 +38,8 @@
38#define __IB_MAD_PRIV_H__ 38#define __IB_MAD_PRIV_H__
39 39
40#include <linux/completion.h> 40#include <linux/completion.h>
41#include <linux/err.h>
41#include <linux/pci.h> 42#include <linux/pci.h>
42#include <linux/kthread.h>
43#include <linux/workqueue.h> 43#include <linux/workqueue.h>
44#include <rdma/ib_mad.h> 44#include <rdma/ib_mad.h>
45#include <rdma/ib_smi.h> 45#include <rdma/ib_smi.h>
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index ebcd5b181770..1ef79d015a1e 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -33,8 +33,6 @@
33 * $Id: mad_rmpp.c 1921 2005-03-02 22:58:44Z sean.hefty $ 33 * $Id: mad_rmpp.c 1921 2005-03-02 22:58:44Z sean.hefty $
34 */ 34 */
35 35
36#include <linux/dma-mapping.h>
37
38#include "mad_priv.h" 36#include "mad_priv.h"
39#include "mad_rmpp.h" 37#include "mad_rmpp.h"
40 38
@@ -60,6 +58,7 @@ struct mad_rmpp_recv {
60 int last_ack; 58 int last_ack;
61 int seg_num; 59 int seg_num;
62 int newwin; 60 int newwin;
61 int repwin;
63 62
64 __be64 tid; 63 __be64 tid;
65 u32 src_qp; 64 u32 src_qp;
@@ -170,6 +169,32 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
170 return msg; 169 return msg;
171} 170}
172 171
172static void ack_ds_ack(struct ib_mad_agent_private *agent,
173 struct ib_mad_recv_wc *recv_wc)
174{
175 struct ib_mad_send_buf *msg;
176 struct ib_rmpp_mad *rmpp_mad;
177 int ret;
178
179 msg = alloc_response_msg(&agent->agent, recv_wc);
180 if (IS_ERR(msg))
181 return;
182
183 rmpp_mad = msg->mad;
184 memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len);
185
186 rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
187 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
188 rmpp_mad->rmpp_hdr.seg_num = 0;
189 rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(1);
190
191 ret = ib_post_send_mad(msg, NULL);
192 if (ret) {
193 ib_destroy_ah(msg->ah);
194 ib_free_send_mad(msg);
195 }
196}
197
173void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc) 198void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc)
174{ 199{
175 struct ib_rmpp_mad *rmpp_mad = mad_send_wc->send_buf->mad; 200 struct ib_rmpp_mad *rmpp_mad = mad_send_wc->send_buf->mad;
@@ -271,6 +296,7 @@ create_rmpp_recv(struct ib_mad_agent_private *agent,
271 rmpp_recv->newwin = 1; 296 rmpp_recv->newwin = 1;
272 rmpp_recv->seg_num = 1; 297 rmpp_recv->seg_num = 1;
273 rmpp_recv->last_ack = 0; 298 rmpp_recv->last_ack = 0;
299 rmpp_recv->repwin = 1;
274 300
275 mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr; 301 mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
276 rmpp_recv->tid = mad_hdr->tid; 302 rmpp_recv->tid = mad_hdr->tid;
@@ -365,7 +391,7 @@ static inline int window_size(struct ib_mad_agent_private *agent)
365static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list, 391static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list,
366 int seg_num) 392 int seg_num)
367{ 393{
368 struct ib_mad_recv_buf *seg_buf; 394 struct ib_mad_recv_buf *seg_buf;
369 int cur_seg_num; 395 int cur_seg_num;
370 396
371 list_for_each_entry_reverse(seg_buf, rmpp_list, list) { 397 list_for_each_entry_reverse(seg_buf, rmpp_list, list) {
@@ -591,6 +617,16 @@ static inline void adjust_last_ack(struct ib_mad_send_wr_private *wr,
591 break; 617 break;
592} 618}
593 619
620static void process_ds_ack(struct ib_mad_agent_private *agent,
621 struct ib_mad_recv_wc *mad_recv_wc, int newwin)
622{
623 struct mad_rmpp_recv *rmpp_recv;
624
625 rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
626 if (rmpp_recv && rmpp_recv->state == RMPP_STATE_COMPLETE)
627 rmpp_recv->repwin = newwin;
628}
629
594static void process_rmpp_ack(struct ib_mad_agent_private *agent, 630static void process_rmpp_ack(struct ib_mad_agent_private *agent,
595 struct ib_mad_recv_wc *mad_recv_wc) 631 struct ib_mad_recv_wc *mad_recv_wc)
596{ 632{
@@ -616,8 +652,18 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
616 652
617 spin_lock_irqsave(&agent->lock, flags); 653 spin_lock_irqsave(&agent->lock, flags);
618 mad_send_wr = ib_find_send_mad(agent, mad_recv_wc); 654 mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
619 if (!mad_send_wr) 655 if (!mad_send_wr) {
620 goto out; /* Unmatched ACK */ 656 if (!seg_num)
657 process_ds_ack(agent, mad_recv_wc, newwin);
658 goto out; /* Unmatched or DS RMPP ACK */
659 }
660
661 if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) &&
662 (mad_send_wr->timeout)) {
663 spin_unlock_irqrestore(&agent->lock, flags);
664 ack_ds_ack(agent, mad_recv_wc);
665 return; /* Repeated ACK for DS RMPP transaction */
666 }
621 667
622 if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) || 668 if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
623 (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) 669 (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
@@ -656,6 +702,9 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
656 if (mad_send_wr->refcount == 1) 702 if (mad_send_wr->refcount == 1)
657 ib_reset_mad_timeout(mad_send_wr, 703 ib_reset_mad_timeout(mad_send_wr,
658 mad_send_wr->send_buf.timeout_ms); 704 mad_send_wr->send_buf.timeout_ms);
705 spin_unlock_irqrestore(&agent->lock, flags);
706 ack_ds_ack(agent, mad_recv_wc);
707 return;
659 } else if (mad_send_wr->refcount == 1 && 708 } else if (mad_send_wr->refcount == 1 &&
660 mad_send_wr->seg_num < mad_send_wr->newwin && 709 mad_send_wr->seg_num < mad_send_wr->newwin &&
661 mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) { 710 mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) {
@@ -772,6 +821,39 @@ out:
772 return NULL; 821 return NULL;
773} 822}
774 823
824static int init_newwin(struct ib_mad_send_wr_private *mad_send_wr)
825{
826 struct ib_mad_agent_private *agent = mad_send_wr->mad_agent_priv;
827 struct ib_mad_hdr *mad_hdr = mad_send_wr->send_buf.mad;
828 struct mad_rmpp_recv *rmpp_recv;
829 struct ib_ah_attr ah_attr;
830 unsigned long flags;
831 int newwin = 1;
832
833 if (!(mad_hdr->method & IB_MGMT_METHOD_RESP))
834 goto out;
835
836 spin_lock_irqsave(&agent->lock, flags);
837 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
838 if (rmpp_recv->tid != mad_hdr->tid ||
839 rmpp_recv->mgmt_class != mad_hdr->mgmt_class ||
840 rmpp_recv->class_version != mad_hdr->class_version ||
841 (rmpp_recv->method & IB_MGMT_METHOD_RESP))
842 continue;
843
844 if (ib_query_ah(mad_send_wr->send_buf.ah, &ah_attr))
845 continue;
846
847 if (rmpp_recv->slid == ah_attr.dlid) {
848 newwin = rmpp_recv->repwin;
849 break;
850 }
851 }
852 spin_unlock_irqrestore(&agent->lock, flags);
853out:
854 return newwin;
855}
856
775int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr) 857int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
776{ 858{
777 struct ib_rmpp_mad *rmpp_mad; 859 struct ib_rmpp_mad *rmpp_mad;
@@ -787,7 +869,7 @@ int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
787 return IB_RMPP_RESULT_INTERNAL; 869 return IB_RMPP_RESULT_INTERNAL;
788 } 870 }
789 871
790 mad_send_wr->newwin = 1; 872 mad_send_wr->newwin = init_newwin(mad_send_wr);
791 873
792 /* We need to wait for the final ACK even if there isn't a response */ 874 /* We need to wait for the final ACK even if there isn't a response */
793 mad_send_wr->refcount += (mad_send_wr->timeout == 0); 875 mad_send_wr->refcount += (mad_send_wr->timeout == 0);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index d6b84226bba7..1706d3c7e95e 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc.  All rights reserved. 3 * Copyright (c) 2005 Voltaire, Inc.  All rights reserved.
4 * Copyright (c) 2006 Intel Corporation. All rights reserved.
4 * 5 *
5 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -75,6 +76,7 @@ struct ib_sa_device {
75struct ib_sa_query { 76struct ib_sa_query {
76 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *); 77 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
77 void (*release)(struct ib_sa_query *); 78 void (*release)(struct ib_sa_query *);
79 struct ib_sa_client *client;
78 struct ib_sa_port *port; 80 struct ib_sa_port *port;
79 struct ib_mad_send_buf *mad_buf; 81 struct ib_mad_send_buf *mad_buf;
80 struct ib_sa_sm_ah *sm_ah; 82 struct ib_sa_sm_ah *sm_ah;
@@ -415,6 +417,31 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
415 } 417 }
416} 418}
417 419
420void ib_sa_register_client(struct ib_sa_client *client)
421{
422 atomic_set(&client->users, 1);
423 init_completion(&client->comp);
424}
425EXPORT_SYMBOL(ib_sa_register_client);
426
427static inline void ib_sa_client_get(struct ib_sa_client *client)
428{
429 atomic_inc(&client->users);
430}
431
432static inline void ib_sa_client_put(struct ib_sa_client *client)
433{
434 if (atomic_dec_and_test(&client->users))
435 complete(&client->comp);
436}
437
438void ib_sa_unregister_client(struct ib_sa_client *client)
439{
440 ib_sa_client_put(client);
441 wait_for_completion(&client->comp);
442}
443EXPORT_SYMBOL(ib_sa_unregister_client);
444
418/** 445/**
419 * ib_sa_cancel_query - try to cancel an SA query 446 * ib_sa_cancel_query - try to cancel an SA query
420 * @id:ID of query to cancel 447 * @id:ID of query to cancel
@@ -557,6 +584,7 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
557 584
558/** 585/**
559 * ib_sa_path_rec_get - Start a Path get query 586 * ib_sa_path_rec_get - Start a Path get query
587 * @client:SA client
560 * @device:device to send query on 588 * @device:device to send query on
561 * @port_num: port number to send query on 589 * @port_num: port number to send query on
562 * @rec:Path Record to send in query 590 * @rec:Path Record to send in query
@@ -579,7 +607,8 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
579 * error code. Otherwise it is a query ID that can be used to cancel 607 * error code. Otherwise it is a query ID that can be used to cancel
580 * the query. 608 * the query.
581 */ 609 */
582int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, 610int ib_sa_path_rec_get(struct ib_sa_client *client,
611 struct ib_device *device, u8 port_num,
583 struct ib_sa_path_rec *rec, 612 struct ib_sa_path_rec *rec,
584 ib_sa_comp_mask comp_mask, 613 ib_sa_comp_mask comp_mask,
585 int timeout_ms, gfp_t gfp_mask, 614 int timeout_ms, gfp_t gfp_mask,
@@ -614,8 +643,10 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
614 goto err1; 643 goto err1;
615 } 644 }
616 645
617 query->callback = callback; 646 ib_sa_client_get(client);
618 query->context = context; 647 query->sa_query.client = client;
648 query->callback = callback;
649 query->context = context;
619 650
620 mad = query->sa_query.mad_buf->mad; 651 mad = query->sa_query.mad_buf->mad;
621 init_mad(mad, agent); 652 init_mad(mad, agent);
@@ -639,6 +670,7 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
639 670
640err2: 671err2:
641 *sa_query = NULL; 672 *sa_query = NULL;
673 ib_sa_client_put(query->sa_query.client);
642 ib_free_send_mad(query->sa_query.mad_buf); 674 ib_free_send_mad(query->sa_query.mad_buf);
643 675
644err1: 676err1:
@@ -671,6 +703,7 @@ static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
671 703
672/** 704/**
673 * ib_sa_service_rec_query - Start Service Record operation 705 * ib_sa_service_rec_query - Start Service Record operation
706 * @client:SA client
674 * @device:device to send request on 707 * @device:device to send request on
675 * @port_num: port number to send request on 708 * @port_num: port number to send request on
676 * @method:SA method - should be get, set, or delete 709 * @method:SA method - should be get, set, or delete
@@ -695,7 +728,8 @@ static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
695 * error code. Otherwise it is a request ID that can be used to cancel 728 * error code. Otherwise it is a request ID that can be used to cancel
696 * the query. 729 * the query.
697 */ 730 */
698int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method, 731int ib_sa_service_rec_query(struct ib_sa_client *client,
732 struct ib_device *device, u8 port_num, u8 method,
699 struct ib_sa_service_rec *rec, 733 struct ib_sa_service_rec *rec,
700 ib_sa_comp_mask comp_mask, 734 ib_sa_comp_mask comp_mask,
701 int timeout_ms, gfp_t gfp_mask, 735 int timeout_ms, gfp_t gfp_mask,
@@ -735,8 +769,10 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method,
735 goto err1; 769 goto err1;
736 } 770 }
737 771
738 query->callback = callback; 772 ib_sa_client_get(client);
739 query->context = context; 773 query->sa_query.client = client;
774 query->callback = callback;
775 query->context = context;
740 776
741 mad = query->sa_query.mad_buf->mad; 777 mad = query->sa_query.mad_buf->mad;
742 init_mad(mad, agent); 778 init_mad(mad, agent);
@@ -761,6 +797,7 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method,
761 797
762err2: 798err2:
763 *sa_query = NULL; 799 *sa_query = NULL;
800 ib_sa_client_put(query->sa_query.client);
764 ib_free_send_mad(query->sa_query.mad_buf); 801 ib_free_send_mad(query->sa_query.mad_buf);
765 802
766err1: 803err1:
@@ -791,7 +828,8 @@ static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
791 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query)); 828 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
792} 829}
793 830
794int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num, 831int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
832 struct ib_device *device, u8 port_num,
795 u8 method, 833 u8 method,
796 struct ib_sa_mcmember_rec *rec, 834 struct ib_sa_mcmember_rec *rec,
797 ib_sa_comp_mask comp_mask, 835 ib_sa_comp_mask comp_mask,
@@ -827,8 +865,10 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
827 goto err1; 865 goto err1;
828 } 866 }
829 867
830 query->callback = callback; 868 ib_sa_client_get(client);
831 query->context = context; 869 query->sa_query.client = client;
870 query->callback = callback;
871 query->context = context;
832 872
833 mad = query->sa_query.mad_buf->mad; 873 mad = query->sa_query.mad_buf->mad;
834 init_mad(mad, agent); 874 init_mad(mad, agent);
@@ -853,6 +893,7 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
853 893
854err2: 894err2:
855 *sa_query = NULL; 895 *sa_query = NULL;
896 ib_sa_client_put(query->sa_query.client);
856 ib_free_send_mad(query->sa_query.mad_buf); 897 ib_free_send_mad(query->sa_query.mad_buf);
857 898
858err1: 899err1:
@@ -887,8 +928,9 @@ static void send_handler(struct ib_mad_agent *agent,
887 idr_remove(&query_idr, query->id); 928 idr_remove(&query_idr, query->id);
888 spin_unlock_irqrestore(&idr_lock, flags); 929 spin_unlock_irqrestore(&idr_lock, flags);
889 930
890 ib_free_send_mad(mad_send_wc->send_buf); 931 ib_free_send_mad(mad_send_wc->send_buf);
891 kref_put(&query->sm_ah->ref, free_sm_ah); 932 kref_put(&query->sm_ah->ref, free_sm_ah);
933 ib_sa_client_put(query->client);
892 query->release(query); 934 query->release(query);
893} 935}
894 936
@@ -919,7 +961,10 @@ static void ib_sa_add_one(struct ib_device *device)
919 struct ib_sa_device *sa_dev; 961 struct ib_sa_device *sa_dev;
920 int s, e, i; 962 int s, e, i;
921 963
922 if (device->node_type == IB_NODE_SWITCH) 964 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
965 return;
966
967 if (device->node_type == RDMA_NODE_IB_SWITCH)
923 s = e = 0; 968 s = e = 0;
924 else { 969 else {
925 s = 1; 970 s = 1;
diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c
index 35852e794e26..54b81e17ad50 100644
--- a/drivers/infiniband/core/smi.c
+++ b/drivers/infiniband/core/smi.c
@@ -64,7 +64,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp,
64 64
65 /* C14-9:2 */ 65 /* C14-9:2 */
66 if (hop_ptr && hop_ptr < hop_cnt) { 66 if (hop_ptr && hop_ptr < hop_cnt) {
67 if (node_type != IB_NODE_SWITCH) 67 if (node_type != RDMA_NODE_IB_SWITCH)
68 return 0; 68 return 0;
69 69
70 /* smp->return_path set when received */ 70 /* smp->return_path set when received */
@@ -77,7 +77,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp,
77 if (hop_ptr == hop_cnt) { 77 if (hop_ptr == hop_cnt) {
78 /* smp->return_path set when received */ 78 /* smp->return_path set when received */
79 smp->hop_ptr++; 79 smp->hop_ptr++;
80 return (node_type == IB_NODE_SWITCH || 80 return (node_type == RDMA_NODE_IB_SWITCH ||
81 smp->dr_dlid == IB_LID_PERMISSIVE); 81 smp->dr_dlid == IB_LID_PERMISSIVE);
82 } 82 }
83 83
@@ -95,7 +95,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp,
95 95
96 /* C14-13:2 */ 96 /* C14-13:2 */
97 if (2 <= hop_ptr && hop_ptr <= hop_cnt) { 97 if (2 <= hop_ptr && hop_ptr <= hop_cnt) {
98 if (node_type != IB_NODE_SWITCH) 98 if (node_type != RDMA_NODE_IB_SWITCH)
99 return 0; 99 return 0;
100 100
101 smp->hop_ptr--; 101 smp->hop_ptr--;
@@ -107,7 +107,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp,
107 if (hop_ptr == 1) { 107 if (hop_ptr == 1) {
108 smp->hop_ptr--; 108 smp->hop_ptr--;
109 /* C14-13:3 -- SMPs destined for SM shouldn't be here */ 109 /* C14-13:3 -- SMPs destined for SM shouldn't be here */
110 return (node_type == IB_NODE_SWITCH || 110 return (node_type == RDMA_NODE_IB_SWITCH ||
111 smp->dr_slid == IB_LID_PERMISSIVE); 111 smp->dr_slid == IB_LID_PERMISSIVE);
112 } 112 }
113 113
@@ -142,7 +142,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp,
142 142
143 /* C14-9:2 -- intermediate hop */ 143 /* C14-9:2 -- intermediate hop */
144 if (hop_ptr && hop_ptr < hop_cnt) { 144 if (hop_ptr && hop_ptr < hop_cnt) {
145 if (node_type != IB_NODE_SWITCH) 145 if (node_type != RDMA_NODE_IB_SWITCH)
146 return 0; 146 return 0;
147 147
148 smp->return_path[hop_ptr] = port_num; 148 smp->return_path[hop_ptr] = port_num;
@@ -156,7 +156,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp,
156 smp->return_path[hop_ptr] = port_num; 156 smp->return_path[hop_ptr] = port_num;
157 /* smp->hop_ptr updated when sending */ 157 /* smp->hop_ptr updated when sending */
158 158
159 return (node_type == IB_NODE_SWITCH || 159 return (node_type == RDMA_NODE_IB_SWITCH ||
160 smp->dr_dlid == IB_LID_PERMISSIVE); 160 smp->dr_dlid == IB_LID_PERMISSIVE);
161 } 161 }
162 162
@@ -175,7 +175,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp,
175 175
176 /* C14-13:2 */ 176 /* C14-13:2 */
177 if (2 <= hop_ptr && hop_ptr <= hop_cnt) { 177 if (2 <= hop_ptr && hop_ptr <= hop_cnt) {
178 if (node_type != IB_NODE_SWITCH) 178 if (node_type != RDMA_NODE_IB_SWITCH)
179 return 0; 179 return 0;
180 180
181 /* smp->hop_ptr updated when sending */ 181 /* smp->hop_ptr updated when sending */
@@ -190,7 +190,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp,
190 return 1; 190 return 1;
191 } 191 }
192 /* smp->hop_ptr updated when sending */ 192 /* smp->hop_ptr updated when sending */
193 return (node_type == IB_NODE_SWITCH); 193 return (node_type == RDMA_NODE_IB_SWITCH);
194 } 194 }
195 195
196 /* C14-13:4 -- hop_ptr = 0 -> give to SM */ 196 /* C14-13:4 -- hop_ptr = 0 -> give to SM */
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 21f9282c1b25..709323c14c5d 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -68,7 +68,7 @@ struct port_table_attribute {
68 int index; 68 int index;
69}; 69};
70 70
71static inline int ibdev_is_alive(const struct ib_device *dev) 71static inline int ibdev_is_alive(const struct ib_device *dev)
72{ 72{
73 return dev->reg_state == IB_DEV_REGISTERED; 73 return dev->reg_state == IB_DEV_REGISTERED;
74} 74}
@@ -589,10 +589,11 @@ static ssize_t show_node_type(struct class_device *cdev, char *buf)
589 return -ENODEV; 589 return -ENODEV;
590 590
591 switch (dev->node_type) { 591 switch (dev->node_type) {
592 case IB_NODE_CA: return sprintf(buf, "%d: CA\n", dev->node_type); 592 case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type);
593 case IB_NODE_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type); 593 case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type);
594 case IB_NODE_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type); 594 case RDMA_NODE_IB_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type);
595 default: return sprintf(buf, "%d: <unknown>\n", dev->node_type); 595 case RDMA_NODE_IB_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type);
596 default: return sprintf(buf, "%d: <unknown>\n", dev->node_type);
596 } 597 }
597} 598}
598 599
@@ -708,7 +709,7 @@ int ib_device_register_sysfs(struct ib_device *device)
708 if (ret) 709 if (ret)
709 goto err_put; 710 goto err_put;
710 711
711 if (device->node_type == IB_NODE_SWITCH) { 712 if (device->node_type == RDMA_NODE_IB_SWITCH) {
712 ret = add_port(device, 0); 713 ret = add_port(device, 0);
713 if (ret) 714 if (ret)
714 goto err_put; 715 goto err_put;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index c1c6fda9452c..ad4f4d5c2924 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -309,9 +309,9 @@ static int ib_ucm_event_process(struct ib_cm_event *evt,
309 info = evt->param.apr_rcvd.apr_info; 309 info = evt->param.apr_rcvd.apr_info;
310 break; 310 break;
311 case IB_CM_SIDR_REQ_RECEIVED: 311 case IB_CM_SIDR_REQ_RECEIVED:
312 uvt->resp.u.sidr_req_resp.pkey = 312 uvt->resp.u.sidr_req_resp.pkey =
313 evt->param.sidr_req_rcvd.pkey; 313 evt->param.sidr_req_rcvd.pkey;
314 uvt->resp.u.sidr_req_resp.port = 314 uvt->resp.u.sidr_req_resp.port =
315 evt->param.sidr_req_rcvd.port; 315 evt->param.sidr_req_rcvd.port;
316 uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE; 316 uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE;
317 break; 317 break;
@@ -1237,7 +1237,7 @@ static struct class ucm_class = {
1237static ssize_t show_ibdev(struct class_device *class_dev, char *buf) 1237static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
1238{ 1238{
1239 struct ib_ucm_device *dev; 1239 struct ib_ucm_device *dev;
1240 1240
1241 dev = container_of(class_dev, struct ib_ucm_device, class_dev); 1241 dev = container_of(class_dev, struct ib_ucm_device, class_dev);
1242 return sprintf(buf, "%s\n", dev->ib_dev->name); 1242 return sprintf(buf, "%s\n", dev->ib_dev->name);
1243} 1243}
@@ -1247,7 +1247,8 @@ static void ib_ucm_add_one(struct ib_device *device)
1247{ 1247{
1248 struct ib_ucm_device *ucm_dev; 1248 struct ib_ucm_device *ucm_dev;
1249 1249
1250 if (!device->alloc_ucontext) 1250 if (!device->alloc_ucontext ||
1251 rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1251 return; 1252 return;
1252 1253
1253 ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL); 1254 ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL);
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 1273f8807e84..807fbd6b8414 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * 5 *
6 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
@@ -1032,7 +1032,10 @@ static void ib_umad_add_one(struct ib_device *device)
1032 struct ib_umad_device *umad_dev; 1032 struct ib_umad_device *umad_dev;
1033 int s, e, i; 1033 int s, e, i;
1034 1034
1035 if (device->node_type == IB_NODE_SWITCH) 1035 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1036 return;
1037
1038 if (device->node_type == RDMA_NODE_IB_SWITCH)
1036 s = e = 0; 1039 s = e = 0;
1037 else { 1040 else {
1038 s = 1; 1041 s = 1;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 30923eb68ec7..b72c7f69ca90 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -155,7 +155,7 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
155} 155}
156 156
157static struct ib_uobject *idr_read_uobj(struct idr *idr, int id, 157static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
158 struct ib_ucontext *context) 158 struct ib_ucontext *context, int nested)
159{ 159{
160 struct ib_uobject *uobj; 160 struct ib_uobject *uobj;
161 161
@@ -163,7 +163,10 @@ static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
163 if (!uobj) 163 if (!uobj)
164 return NULL; 164 return NULL;
165 165
166 down_read(&uobj->mutex); 166 if (nested)
167 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
168 else
169 down_read(&uobj->mutex);
167 if (!uobj->live) { 170 if (!uobj->live) {
168 put_uobj_read(uobj); 171 put_uobj_read(uobj);
169 return NULL; 172 return NULL;
@@ -190,17 +193,18 @@ static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
190 return uobj; 193 return uobj;
191} 194}
192 195
193static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context) 196static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
197 int nested)
194{ 198{
195 struct ib_uobject *uobj; 199 struct ib_uobject *uobj;
196 200
197 uobj = idr_read_uobj(idr, id, context); 201 uobj = idr_read_uobj(idr, id, context, nested);
198 return uobj ? uobj->object : NULL; 202 return uobj ? uobj->object : NULL;
199} 203}
200 204
201static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context) 205static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
202{ 206{
203 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context); 207 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
204} 208}
205 209
206static void put_pd_read(struct ib_pd *pd) 210static void put_pd_read(struct ib_pd *pd)
@@ -208,9 +212,9 @@ static void put_pd_read(struct ib_pd *pd)
208 put_uobj_read(pd->uobject); 212 put_uobj_read(pd->uobject);
209} 213}
210 214
211static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context) 215static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
212{ 216{
213 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context); 217 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
214} 218}
215 219
216static void put_cq_read(struct ib_cq *cq) 220static void put_cq_read(struct ib_cq *cq)
@@ -220,7 +224,7 @@ static void put_cq_read(struct ib_cq *cq)
220 224
221static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context) 225static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
222{ 226{
223 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context); 227 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
224} 228}
225 229
226static void put_ah_read(struct ib_ah *ah) 230static void put_ah_read(struct ib_ah *ah)
@@ -230,7 +234,7 @@ static void put_ah_read(struct ib_ah *ah)
230 234
231static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context) 235static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
232{ 236{
233 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context); 237 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
234} 238}
235 239
236static void put_qp_read(struct ib_qp *qp) 240static void put_qp_read(struct ib_qp *qp)
@@ -240,7 +244,7 @@ static void put_qp_read(struct ib_qp *qp)
240 244
241static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context) 245static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
242{ 246{
243 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context); 247 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
244} 248}
245 249
246static void put_srq_read(struct ib_srq *srq) 250static void put_srq_read(struct ib_srq *srq)
@@ -837,7 +841,6 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
837err_copy: 841err_copy:
838 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject); 842 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
839 843
840
841err_free: 844err_free:
842 ib_destroy_cq(cq); 845 ib_destroy_cq(cq);
843 846
@@ -867,7 +870,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
867 (unsigned long) cmd.response + sizeof resp, 870 (unsigned long) cmd.response + sizeof resp,
868 in_len - sizeof cmd, out_len - sizeof resp); 871 in_len - sizeof cmd, out_len - sizeof resp);
869 872
870 cq = idr_read_cq(cmd.cq_handle, file->ucontext); 873 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
871 if (!cq) 874 if (!cq)
872 return -EINVAL; 875 return -EINVAL;
873 876
@@ -875,11 +878,10 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
875 if (ret) 878 if (ret)
876 goto out; 879 goto out;
877 880
878 memset(&resp, 0, sizeof resp);
879 resp.cqe = cq->cqe; 881 resp.cqe = cq->cqe;
880 882
881 if (copy_to_user((void __user *) (unsigned long) cmd.response, 883 if (copy_to_user((void __user *) (unsigned long) cmd.response,
882 &resp, sizeof resp)) 884 &resp, sizeof resp.cqe))
883 ret = -EFAULT; 885 ret = -EFAULT;
884 886
885out: 887out:
@@ -894,7 +896,6 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
894{ 896{
895 struct ib_uverbs_poll_cq cmd; 897 struct ib_uverbs_poll_cq cmd;
896 struct ib_uverbs_poll_cq_resp *resp; 898 struct ib_uverbs_poll_cq_resp *resp;
897 struct ib_uobject *uobj;
898 struct ib_cq *cq; 899 struct ib_cq *cq;
899 struct ib_wc *wc; 900 struct ib_wc *wc;
900 int ret = 0; 901 int ret = 0;
@@ -915,16 +916,15 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
915 goto out_wc; 916 goto out_wc;
916 } 917 }
917 918
918 uobj = idr_read_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext); 919 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
919 if (!uobj) { 920 if (!cq) {
920 ret = -EINVAL; 921 ret = -EINVAL;
921 goto out; 922 goto out;
922 } 923 }
923 cq = uobj->object;
924 924
925 resp->count = ib_poll_cq(cq, cmd.ne, wc); 925 resp->count = ib_poll_cq(cq, cmd.ne, wc);
926 926
927 put_uobj_read(uobj); 927 put_cq_read(cq);
928 928
929 for (i = 0; i < resp->count; i++) { 929 for (i = 0; i < resp->count; i++) {
930 resp->wc[i].wr_id = wc[i].wr_id; 930 resp->wc[i].wr_id = wc[i].wr_id;
@@ -959,21 +959,19 @@ ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
959 int out_len) 959 int out_len)
960{ 960{
961 struct ib_uverbs_req_notify_cq cmd; 961 struct ib_uverbs_req_notify_cq cmd;
962 struct ib_uobject *uobj;
963 struct ib_cq *cq; 962 struct ib_cq *cq;
964 963
965 if (copy_from_user(&cmd, buf, sizeof cmd)) 964 if (copy_from_user(&cmd, buf, sizeof cmd))
966 return -EFAULT; 965 return -EFAULT;
967 966
968 uobj = idr_read_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext); 967 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
969 if (!uobj) 968 if (!cq)
970 return -EINVAL; 969 return -EINVAL;
971 cq = uobj->object;
972 970
973 ib_req_notify_cq(cq, cmd.solicited_only ? 971 ib_req_notify_cq(cq, cmd.solicited_only ?
974 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 972 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
975 973
976 put_uobj_read(uobj); 974 put_cq_read(cq);
977 975
978 return in_len; 976 return in_len;
979} 977}
@@ -1064,9 +1062,9 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1064 1062
1065 srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL; 1063 srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL;
1066 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1064 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1067 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext); 1065 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0);
1068 rcq = cmd.recv_cq_handle == cmd.send_cq_handle ? 1066 rcq = cmd.recv_cq_handle == cmd.send_cq_handle ?
1069 scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext); 1067 scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1);
1070 1068
1071 if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) { 1069 if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) {
1072 ret = -EINVAL; 1070 ret = -EINVAL;
@@ -1274,6 +1272,7 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
1274 int out_len) 1272 int out_len)
1275{ 1273{
1276 struct ib_uverbs_modify_qp cmd; 1274 struct ib_uverbs_modify_qp cmd;
1275 struct ib_udata udata;
1277 struct ib_qp *qp; 1276 struct ib_qp *qp;
1278 struct ib_qp_attr *attr; 1277 struct ib_qp_attr *attr;
1279 int ret; 1278 int ret;
@@ -1281,6 +1280,9 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
1281 if (copy_from_user(&cmd, buf, sizeof cmd)) 1280 if (copy_from_user(&cmd, buf, sizeof cmd))
1282 return -EFAULT; 1281 return -EFAULT;
1283 1282
1283 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
1284 out_len);
1285
1284 attr = kmalloc(sizeof *attr, GFP_KERNEL); 1286 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1285 if (!attr) 1287 if (!attr)
1286 return -ENOMEM; 1288 return -ENOMEM;
@@ -1337,7 +1339,7 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
1337 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; 1339 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
1338 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; 1340 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
1339 1341
1340 ret = ib_modify_qp(qp, attr, cmd.attr_mask); 1342 ret = qp->device->modify_qp(qp, attr, cmd.attr_mask, &udata);
1341 1343
1342 put_qp_read(qp); 1344 put_qp_read(qp);
1343 1345
@@ -1674,7 +1676,6 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
1674 break; 1676 break;
1675 } 1677 }
1676 1678
1677
1678 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1679 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1679 &resp, sizeof resp)) 1680 &resp, sizeof resp))
1680 ret = -EFAULT; 1681 ret = -EFAULT;
@@ -1724,7 +1725,6 @@ ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
1724 break; 1725 break;
1725 } 1726 }
1726 1727
1727
1728 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1728 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1729 &resp, sizeof resp)) 1729 &resp, sizeof resp))
1730 ret = -EFAULT; 1730 ret = -EFAULT;
@@ -2055,6 +2055,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
2055 int out_len) 2055 int out_len)
2056{ 2056{
2057 struct ib_uverbs_modify_srq cmd; 2057 struct ib_uverbs_modify_srq cmd;
2058 struct ib_udata udata;
2058 struct ib_srq *srq; 2059 struct ib_srq *srq;
2059 struct ib_srq_attr attr; 2060 struct ib_srq_attr attr;
2060 int ret; 2061 int ret;
@@ -2062,6 +2063,9 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
2062 if (copy_from_user(&cmd, buf, sizeof cmd)) 2063 if (copy_from_user(&cmd, buf, sizeof cmd))
2063 return -EFAULT; 2064 return -EFAULT;
2064 2065
2066 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
2067 out_len);
2068
2065 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2069 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2066 if (!srq) 2070 if (!srq)
2067 return -EINVAL; 2071 return -EINVAL;
@@ -2069,7 +2073,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
2069 attr.max_wr = cmd.max_wr; 2073 attr.max_wr = cmd.max_wr;
2070 attr.srq_limit = cmd.srq_limit; 2074 attr.srq_limit = cmd.srq_limit;
2071 2075
2072 ret = ib_modify_srq(srq, &attr, cmd.attr_mask); 2076 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
2073 2077
2074 put_srq_read(srq); 2078 put_srq_read(srq);
2075 2079
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 468999c38803..8b5dd3649bbf 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -79,6 +79,23 @@ enum ib_rate mult_to_ib_rate(int mult)
79} 79}
80EXPORT_SYMBOL(mult_to_ib_rate); 80EXPORT_SYMBOL(mult_to_ib_rate);
81 81
82enum rdma_transport_type
83rdma_node_get_transport(enum rdma_node_type node_type)
84{
85 switch (node_type) {
86 case RDMA_NODE_IB_CA:
87 case RDMA_NODE_IB_SWITCH:
88 case RDMA_NODE_IB_ROUTER:
89 return RDMA_TRANSPORT_IB;
90 case RDMA_NODE_RNIC:
91 return RDMA_TRANSPORT_IWARP;
92 default:
93 BUG();
94 return 0;
95 }
96}
97EXPORT_SYMBOL(rdma_node_get_transport);
98
82/* Protection domains */ 99/* Protection domains */
83 100
84struct ib_pd *ib_alloc_pd(struct ib_device *device) 101struct ib_pd *ib_alloc_pd(struct ib_device *device)
@@ -231,7 +248,7 @@ int ib_modify_srq(struct ib_srq *srq,
231 struct ib_srq_attr *srq_attr, 248 struct ib_srq_attr *srq_attr,
232 enum ib_srq_attr_mask srq_attr_mask) 249 enum ib_srq_attr_mask srq_attr_mask)
233{ 250{
234 return srq->device->modify_srq(srq, srq_attr, srq_attr_mask); 251 return srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL);
235} 252}
236EXPORT_SYMBOL(ib_modify_srq); 253EXPORT_SYMBOL(ib_modify_srq);
237 254
@@ -547,7 +564,7 @@ int ib_modify_qp(struct ib_qp *qp,
547 struct ib_qp_attr *qp_attr, 564 struct ib_qp_attr *qp_attr,
548 int qp_attr_mask) 565 int qp_attr_mask)
549{ 566{
550 return qp->device->modify_qp(qp, qp_attr, qp_attr_mask); 567 return qp->device->modify_qp(qp, qp_attr, qp_attr_mask, NULL);
551} 568}
552EXPORT_SYMBOL(ib_modify_qp); 569EXPORT_SYMBOL(ib_modify_qp);
553 570
diff --git a/drivers/infiniband/hw/amso1100/Kbuild b/drivers/infiniband/hw/amso1100/Kbuild
new file mode 100644
index 000000000000..06964c4af849
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/Kbuild
@@ -0,0 +1,8 @@
1ifdef CONFIG_INFINIBAND_AMSO1100_DEBUG
2EXTRA_CFLAGS += -DDEBUG
3endif
4
5obj-$(CONFIG_INFINIBAND_AMSO1100) += iw_c2.o
6
7iw_c2-y := c2.o c2_provider.o c2_rnic.o c2_alloc.o c2_mq.o c2_ae.o c2_vq.o \
8 c2_intr.o c2_cq.o c2_qp.o c2_cm.o c2_mm.o c2_pd.o
diff --git a/drivers/infiniband/hw/amso1100/Kconfig b/drivers/infiniband/hw/amso1100/Kconfig
new file mode 100644
index 000000000000..809cb14ac6de
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/Kconfig
@@ -0,0 +1,15 @@
1config INFINIBAND_AMSO1100
2 tristate "Ammasso 1100 HCA support"
3 depends on PCI && INET && INFINIBAND
4 ---help---
5 This is a low-level driver for the Ammasso 1100 host
6 channel adapter (HCA).
7
8config INFINIBAND_AMSO1100_DEBUG
9 bool "Verbose debugging output"
10 depends on INFINIBAND_AMSO1100
11 default n
12 ---help---
13 This option causes the amso1100 driver to produce a bunch of
14 debug messages. Select this if you are developing the driver
15 or trying to diagnose a problem.
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
new file mode 100644
index 000000000000..9e9120f36019
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -0,0 +1,1255 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#include <linux/module.h>
34#include <linux/moduleparam.h>
35#include <linux/pci.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/inetdevice.h>
39#include <linux/delay.h>
40#include <linux/ethtool.h>
41#include <linux/mii.h>
42#include <linux/if_vlan.h>
43#include <linux/crc32.h>
44#include <linux/in.h>
45#include <linux/ip.h>
46#include <linux/tcp.h>
47#include <linux/init.h>
48#include <linux/dma-mapping.h>
49
50#include <asm/io.h>
51#include <asm/irq.h>
52#include <asm/byteorder.h>
53
54#include <rdma/ib_smi.h>
55#include "c2.h"
56#include "c2_provider.h"
57
58MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
59MODULE_DESCRIPTION("Ammasso AMSO1100 Low-level iWARP Driver");
60MODULE_LICENSE("Dual BSD/GPL");
61MODULE_VERSION(DRV_VERSION);
62
63static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
64 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
65
66static int debug = -1; /* defaults above */
67module_param(debug, int, 0);
68MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70static int c2_up(struct net_device *netdev);
71static int c2_down(struct net_device *netdev);
72static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
73static void c2_tx_interrupt(struct net_device *netdev);
74static void c2_rx_interrupt(struct net_device *netdev);
75static irqreturn_t c2_interrupt(int irq, void *dev_id, struct pt_regs *regs);
76static void c2_tx_timeout(struct net_device *netdev);
77static int c2_change_mtu(struct net_device *netdev, int new_mtu);
78static void c2_reset(struct c2_port *c2_port);
79static struct net_device_stats *c2_get_stats(struct net_device *netdev);
80
81static struct pci_device_id c2_pci_table[] = {
82 { PCI_DEVICE(0x18b8, 0xb001) },
83 { 0 }
84};
85
86MODULE_DEVICE_TABLE(pci, c2_pci_table);
87
88static void c2_print_macaddr(struct net_device *netdev)
89{
90 pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, "
91 "IRQ %u\n", netdev->name,
92 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
93 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5],
94 netdev->irq);
95}
96
97static void c2_set_rxbufsize(struct c2_port *c2_port)
98{
99 struct net_device *netdev = c2_port->netdev;
100
101 if (netdev->mtu > RX_BUF_SIZE)
102 c2_port->rx_buf_size =
103 netdev->mtu + ETH_HLEN + sizeof(struct c2_rxp_hdr) +
104 NET_IP_ALIGN;
105 else
106 c2_port->rx_buf_size = sizeof(struct c2_rxp_hdr) + RX_BUF_SIZE;
107}
108
109/*
110 * Allocate TX ring elements and chain them together.
111 * One-to-one association of adapter descriptors with ring elements.
112 */
113static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr,
114 dma_addr_t base, void __iomem * mmio_txp_ring)
115{
116 struct c2_tx_desc *tx_desc;
117 struct c2_txp_desc __iomem *txp_desc;
118 struct c2_element *elem;
119 int i;
120
121 tx_ring->start = kmalloc(sizeof(*elem) * tx_ring->count, GFP_KERNEL);
122 if (!tx_ring->start)
123 return -ENOMEM;
124
125 elem = tx_ring->start;
126 tx_desc = vaddr;
127 txp_desc = mmio_txp_ring;
128 for (i = 0; i < tx_ring->count; i++, elem++, tx_desc++, txp_desc++) {
129 tx_desc->len = 0;
130 tx_desc->status = 0;
131
132 /* Set TXP_HTXD_UNINIT */
133 __raw_writeq(cpu_to_be64(0x1122334455667788ULL),
134 (void __iomem *) txp_desc + C2_TXP_ADDR);
135 __raw_writew(0, (void __iomem *) txp_desc + C2_TXP_LEN);
136 __raw_writew(cpu_to_be16(TXP_HTXD_UNINIT),
137 (void __iomem *) txp_desc + C2_TXP_FLAGS);
138
139 elem->skb = NULL;
140 elem->ht_desc = tx_desc;
141 elem->hw_desc = txp_desc;
142
143 if (i == tx_ring->count - 1) {
144 elem->next = tx_ring->start;
145 tx_desc->next_offset = base;
146 } else {
147 elem->next = elem + 1;
148 tx_desc->next_offset =
149 base + (i + 1) * sizeof(*tx_desc);
150 }
151 }
152
153 tx_ring->to_use = tx_ring->to_clean = tx_ring->start;
154
155 return 0;
156}
157
158/*
159 * Allocate RX ring elements and chain them together.
160 * One-to-one association of adapter descriptors with ring elements.
161 */
162static int c2_rx_ring_alloc(struct c2_ring *rx_ring, void *vaddr,
163 dma_addr_t base, void __iomem * mmio_rxp_ring)
164{
165 struct c2_rx_desc *rx_desc;
166 struct c2_rxp_desc __iomem *rxp_desc;
167 struct c2_element *elem;
168 int i;
169
170 rx_ring->start = kmalloc(sizeof(*elem) * rx_ring->count, GFP_KERNEL);
171 if (!rx_ring->start)
172 return -ENOMEM;
173
174 elem = rx_ring->start;
175 rx_desc = vaddr;
176 rxp_desc = mmio_rxp_ring;
177 for (i = 0; i < rx_ring->count; i++, elem++, rx_desc++, rxp_desc++) {
178 rx_desc->len = 0;
179 rx_desc->status = 0;
180
181 /* Set RXP_HRXD_UNINIT */
182 __raw_writew(cpu_to_be16(RXP_HRXD_OK),
183 (void __iomem *) rxp_desc + C2_RXP_STATUS);
184 __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_COUNT);
185 __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_LEN);
186 __raw_writeq(cpu_to_be64(0x99aabbccddeeffULL),
187 (void __iomem *) rxp_desc + C2_RXP_ADDR);
188 __raw_writew(cpu_to_be16(RXP_HRXD_UNINIT),
189 (void __iomem *) rxp_desc + C2_RXP_FLAGS);
190
191 elem->skb = NULL;
192 elem->ht_desc = rx_desc;
193 elem->hw_desc = rxp_desc;
194
195 if (i == rx_ring->count - 1) {
196 elem->next = rx_ring->start;
197 rx_desc->next_offset = base;
198 } else {
199 elem->next = elem + 1;
200 rx_desc->next_offset =
201 base + (i + 1) * sizeof(*rx_desc);
202 }
203 }
204
205 rx_ring->to_use = rx_ring->to_clean = rx_ring->start;
206
207 return 0;
208}
209
210/* Setup buffer for receiving */
211static inline int c2_rx_alloc(struct c2_port *c2_port, struct c2_element *elem)
212{
213 struct c2_dev *c2dev = c2_port->c2dev;
214 struct c2_rx_desc *rx_desc = elem->ht_desc;
215 struct sk_buff *skb;
216 dma_addr_t mapaddr;
217 u32 maplen;
218 struct c2_rxp_hdr *rxp_hdr;
219
220 skb = dev_alloc_skb(c2_port->rx_buf_size);
221 if (unlikely(!skb)) {
222 pr_debug("%s: out of memory for receive\n",
223 c2_port->netdev->name);
224 return -ENOMEM;
225 }
226
227 /* Zero out the rxp hdr in the sk_buff */
228 memset(skb->data, 0, sizeof(*rxp_hdr));
229
230 skb->dev = c2_port->netdev;
231
232 maplen = c2_port->rx_buf_size;
233 mapaddr =
234 pci_map_single(c2dev->pcidev, skb->data, maplen,
235 PCI_DMA_FROMDEVICE);
236
237 /* Set the sk_buff RXP_header to RXP_HRXD_READY */
238 rxp_hdr = (struct c2_rxp_hdr *) skb->data;
239 rxp_hdr->flags = RXP_HRXD_READY;
240
241 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
242 __raw_writew(cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)),
243 elem->hw_desc + C2_RXP_LEN);
244 __raw_writeq(cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR);
245 __raw_writew(cpu_to_be16(RXP_HRXD_READY), elem->hw_desc + C2_RXP_FLAGS);
246
247 elem->skb = skb;
248 elem->mapaddr = mapaddr;
249 elem->maplen = maplen;
250 rx_desc->len = maplen;
251
252 return 0;
253}
254
255/*
256 * Allocate buffers for the Rx ring
257 * For receive: rx_ring.to_clean is next received frame
258 */
259static int c2_rx_fill(struct c2_port *c2_port)
260{
261 struct c2_ring *rx_ring = &c2_port->rx_ring;
262 struct c2_element *elem;
263 int ret = 0;
264
265 elem = rx_ring->start;
266 do {
267 if (c2_rx_alloc(c2_port, elem)) {
268 ret = 1;
269 break;
270 }
271 } while ((elem = elem->next) != rx_ring->start);
272
273 rx_ring->to_clean = rx_ring->start;
274 return ret;
275}
276
277/* Free all buffers in RX ring, assumes receiver stopped */
278static void c2_rx_clean(struct c2_port *c2_port)
279{
280 struct c2_dev *c2dev = c2_port->c2dev;
281 struct c2_ring *rx_ring = &c2_port->rx_ring;
282 struct c2_element *elem;
283 struct c2_rx_desc *rx_desc;
284
285 elem = rx_ring->start;
286 do {
287 rx_desc = elem->ht_desc;
288 rx_desc->len = 0;
289
290 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
291 __raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
292 __raw_writew(0, elem->hw_desc + C2_RXP_LEN);
293 __raw_writeq(cpu_to_be64(0x99aabbccddeeffULL),
294 elem->hw_desc + C2_RXP_ADDR);
295 __raw_writew(cpu_to_be16(RXP_HRXD_UNINIT),
296 elem->hw_desc + C2_RXP_FLAGS);
297
298 if (elem->skb) {
299 pci_unmap_single(c2dev->pcidev, elem->mapaddr,
300 elem->maplen, PCI_DMA_FROMDEVICE);
301 dev_kfree_skb(elem->skb);
302 elem->skb = NULL;
303 }
304 } while ((elem = elem->next) != rx_ring->start);
305}
306
307static inline int c2_tx_free(struct c2_dev *c2dev, struct c2_element *elem)
308{
309 struct c2_tx_desc *tx_desc = elem->ht_desc;
310
311 tx_desc->len = 0;
312
313 pci_unmap_single(c2dev->pcidev, elem->mapaddr, elem->maplen,
314 PCI_DMA_TODEVICE);
315
316 if (elem->skb) {
317 dev_kfree_skb_any(elem->skb);
318 elem->skb = NULL;
319 }
320
321 return 0;
322}
323
324/* Free all buffers in TX ring, assumes transmitter stopped */
325static void c2_tx_clean(struct c2_port *c2_port)
326{
327 struct c2_ring *tx_ring = &c2_port->tx_ring;
328 struct c2_element *elem;
329 struct c2_txp_desc txp_htxd;
330 int retry;
331 unsigned long flags;
332
333 spin_lock_irqsave(&c2_port->tx_lock, flags);
334
335 elem = tx_ring->start;
336
337 do {
338 retry = 0;
339 do {
340 txp_htxd.flags =
341 readw(elem->hw_desc + C2_TXP_FLAGS);
342
343 if (txp_htxd.flags == TXP_HTXD_READY) {
344 retry = 1;
345 __raw_writew(0,
346 elem->hw_desc + C2_TXP_LEN);
347 __raw_writeq(0,
348 elem->hw_desc + C2_TXP_ADDR);
349 __raw_writew(cpu_to_be16(TXP_HTXD_DONE),
350 elem->hw_desc + C2_TXP_FLAGS);
351 c2_port->netstats.tx_dropped++;
352 break;
353 } else {
354 __raw_writew(0,
355 elem->hw_desc + C2_TXP_LEN);
356 __raw_writeq(cpu_to_be64(0x1122334455667788ULL),
357 elem->hw_desc + C2_TXP_ADDR);
358 __raw_writew(cpu_to_be16(TXP_HTXD_UNINIT),
359 elem->hw_desc + C2_TXP_FLAGS);
360 }
361
362 c2_tx_free(c2_port->c2dev, elem);
363
364 } while ((elem = elem->next) != tx_ring->start);
365 } while (retry);
366
367 c2_port->tx_avail = c2_port->tx_ring.count - 1;
368 c2_port->c2dev->cur_tx = tx_ring->to_use - tx_ring->start;
369
370 if (c2_port->tx_avail > MAX_SKB_FRAGS + 1)
371 netif_wake_queue(c2_port->netdev);
372
373 spin_unlock_irqrestore(&c2_port->tx_lock, flags);
374}
375
376/*
377 * Process transmit descriptors marked 'DONE' by the firmware,
378 * freeing up their unneeded sk_buffs.
379 */
380static void c2_tx_interrupt(struct net_device *netdev)
381{
382 struct c2_port *c2_port = netdev_priv(netdev);
383 struct c2_dev *c2dev = c2_port->c2dev;
384 struct c2_ring *tx_ring = &c2_port->tx_ring;
385 struct c2_element *elem;
386 struct c2_txp_desc txp_htxd;
387
388 spin_lock(&c2_port->tx_lock);
389
390 for (elem = tx_ring->to_clean; elem != tx_ring->to_use;
391 elem = elem->next) {
392 txp_htxd.flags =
393 be16_to_cpu(readw(elem->hw_desc + C2_TXP_FLAGS));
394
395 if (txp_htxd.flags != TXP_HTXD_DONE)
396 break;
397
398 if (netif_msg_tx_done(c2_port)) {
399 /* PCI reads are expensive in fast path */
400 txp_htxd.len =
401 be16_to_cpu(readw(elem->hw_desc + C2_TXP_LEN));
402 pr_debug("%s: tx done slot %3Zu status 0x%x len "
403 "%5u bytes\n",
404 netdev->name, elem - tx_ring->start,
405 txp_htxd.flags, txp_htxd.len);
406 }
407
408 c2_tx_free(c2dev, elem);
409 ++(c2_port->tx_avail);
410 }
411
412 tx_ring->to_clean = elem;
413
414 if (netif_queue_stopped(netdev)
415 && c2_port->tx_avail > MAX_SKB_FRAGS + 1)
416 netif_wake_queue(netdev);
417
418 spin_unlock(&c2_port->tx_lock);
419}
420
421static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem)
422{
423 struct c2_rx_desc *rx_desc = elem->ht_desc;
424 struct c2_rxp_hdr *rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
425
426 if (rxp_hdr->status != RXP_HRXD_OK ||
427 rxp_hdr->len > (rx_desc->len - sizeof(*rxp_hdr))) {
428 pr_debug("BAD RXP_HRXD\n");
429 pr_debug(" rx_desc : %p\n", rx_desc);
430 pr_debug(" index : %Zu\n",
431 elem - c2_port->rx_ring.start);
432 pr_debug(" len : %u\n", rx_desc->len);
433 pr_debug(" rxp_hdr : %p [PA %p]\n", rxp_hdr,
434 (void *) __pa((unsigned long) rxp_hdr));
435 pr_debug(" flags : 0x%x\n", rxp_hdr->flags);
436 pr_debug(" status: 0x%x\n", rxp_hdr->status);
437 pr_debug(" len : %u\n", rxp_hdr->len);
438 pr_debug(" rsvd : 0x%x\n", rxp_hdr->rsvd);
439 }
440
441 /* Setup the skb for reuse since we're dropping this pkt */
442 elem->skb->tail = elem->skb->data = elem->skb->head;
443
444 /* Zero out the rxp hdr in the sk_buff */
445 memset(elem->skb->data, 0, sizeof(*rxp_hdr));
446
447 /* Write the descriptor to the adapter's rx ring */
448 __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
449 __raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
450 __raw_writew(cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)),
451 elem->hw_desc + C2_RXP_LEN);
452 __raw_writeq(cpu_to_be64(elem->mapaddr), elem->hw_desc + C2_RXP_ADDR);
453 __raw_writew(cpu_to_be16(RXP_HRXD_READY), elem->hw_desc + C2_RXP_FLAGS);
454
455 pr_debug("packet dropped\n");
456 c2_port->netstats.rx_dropped++;
457}
458
459static void c2_rx_interrupt(struct net_device *netdev)
460{
461 struct c2_port *c2_port = netdev_priv(netdev);
462 struct c2_dev *c2dev = c2_port->c2dev;
463 struct c2_ring *rx_ring = &c2_port->rx_ring;
464 struct c2_element *elem;
465 struct c2_rx_desc *rx_desc;
466 struct c2_rxp_hdr *rxp_hdr;
467 struct sk_buff *skb;
468 dma_addr_t mapaddr;
469 u32 maplen, buflen;
470 unsigned long flags;
471
472 spin_lock_irqsave(&c2dev->lock, flags);
473
474 /* Begin where we left off */
475 rx_ring->to_clean = rx_ring->start + c2dev->cur_rx;
476
477 for (elem = rx_ring->to_clean; elem->next != rx_ring->to_clean;
478 elem = elem->next) {
479 rx_desc = elem->ht_desc;
480 mapaddr = elem->mapaddr;
481 maplen = elem->maplen;
482 skb = elem->skb;
483 rxp_hdr = (struct c2_rxp_hdr *) skb->data;
484
485 if (rxp_hdr->flags != RXP_HRXD_DONE)
486 break;
487 buflen = rxp_hdr->len;
488
489 /* Sanity check the RXP header */
490 if (rxp_hdr->status != RXP_HRXD_OK ||
491 buflen > (rx_desc->len - sizeof(*rxp_hdr))) {
492 c2_rx_error(c2_port, elem);
493 continue;
494 }
495
496 /*
497 * Allocate and map a new skb for replenishing the host
498 * RX desc
499 */
500 if (c2_rx_alloc(c2_port, elem)) {
501 c2_rx_error(c2_port, elem);
502 continue;
503 }
504
505 /* Unmap the old skb */
506 pci_unmap_single(c2dev->pcidev, mapaddr, maplen,
507 PCI_DMA_FROMDEVICE);
508
509 prefetch(skb->data);
510
511 /*
512 * Skip past the leading 8 bytes comprising of the
513 * "struct c2_rxp_hdr", prepended by the adapter
514 * to the usual Ethernet header ("struct ethhdr"),
515 * to the start of the raw Ethernet packet.
516 *
517 * Fix up the various fields in the sk_buff before
518 * passing it up to netif_rx(). The transfer size
519 * (in bytes) specified by the adapter len field of
520 * the "struct rxp_hdr_t" does NOT include the
521 * "sizeof(struct c2_rxp_hdr)".
522 */
523 skb->data += sizeof(*rxp_hdr);
524 skb->tail = skb->data + buflen;
525 skb->len = buflen;
526 skb->dev = netdev;
527 skb->protocol = eth_type_trans(skb, netdev);
528
529 netif_rx(skb);
530
531 netdev->last_rx = jiffies;
532 c2_port->netstats.rx_packets++;
533 c2_port->netstats.rx_bytes += buflen;
534 }
535
536 /* Save where we left off */
537 rx_ring->to_clean = elem;
538 c2dev->cur_rx = elem - rx_ring->start;
539 C2_SET_CUR_RX(c2dev, c2dev->cur_rx);
540
541 spin_unlock_irqrestore(&c2dev->lock, flags);
542}
543
544/*
545 * Handle netisr0 TX & RX interrupts.
546 */
547static irqreturn_t c2_interrupt(int irq, void *dev_id, struct pt_regs *regs)
548{
549 unsigned int netisr0, dmaisr;
550 int handled = 0;
551 struct c2_dev *c2dev = (struct c2_dev *) dev_id;
552
553 /* Process CCILNET interrupts */
554 netisr0 = readl(c2dev->regs + C2_NISR0);
555 if (netisr0) {
556
557 /*
558 * There is an issue with the firmware that always
559 * provides the status of RX for both TX & RX
560 * interrupts. So process both queues here.
561 */
562 c2_rx_interrupt(c2dev->netdev);
563 c2_tx_interrupt(c2dev->netdev);
564
565 /* Clear the interrupt */
566 writel(netisr0, c2dev->regs + C2_NISR0);
567 handled++;
568 }
569
570 /* Process RNIC interrupts */
571 dmaisr = readl(c2dev->regs + C2_DISR);
572 if (dmaisr) {
573 writel(dmaisr, c2dev->regs + C2_DISR);
574 c2_rnic_interrupt(c2dev);
575 handled++;
576 }
577
578 if (handled) {
579 return IRQ_HANDLED;
580 } else {
581 return IRQ_NONE;
582 }
583}
584
585static int c2_up(struct net_device *netdev)
586{
587 struct c2_port *c2_port = netdev_priv(netdev);
588 struct c2_dev *c2dev = c2_port->c2dev;
589 struct c2_element *elem;
590 struct c2_rxp_hdr *rxp_hdr;
591 struct in_device *in_dev;
592 size_t rx_size, tx_size;
593 int ret, i;
594 unsigned int netimr0;
595
596 if (netif_msg_ifup(c2_port))
597 pr_debug("%s: enabling interface\n", netdev->name);
598
599 /* Set the Rx buffer size based on MTU */
600 c2_set_rxbufsize(c2_port);
601
602 /* Allocate DMA'able memory for Tx/Rx host descriptor rings */
603 rx_size = c2_port->rx_ring.count * sizeof(struct c2_rx_desc);
604 tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc);
605
606 c2_port->mem_size = tx_size + rx_size;
607 c2_port->mem = pci_alloc_consistent(c2dev->pcidev, c2_port->mem_size,
608 &c2_port->dma);
609 if (c2_port->mem == NULL) {
610 pr_debug("Unable to allocate memory for "
611 "host descriptor rings\n");
612 return -ENOMEM;
613 }
614
615 memset(c2_port->mem, 0, c2_port->mem_size);
616
617 /* Create the Rx host descriptor ring */
618 if ((ret =
619 c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma,
620 c2dev->mmio_rxp_ring))) {
621 pr_debug("Unable to create RX ring\n");
622 goto bail0;
623 }
624
625 /* Allocate Rx buffers for the host descriptor ring */
626 if (c2_rx_fill(c2_port)) {
627 pr_debug("Unable to fill RX ring\n");
628 goto bail1;
629 }
630
631 /* Create the Tx host descriptor ring */
632 if ((ret = c2_tx_ring_alloc(&c2_port->tx_ring, c2_port->mem + rx_size,
633 c2_port->dma + rx_size,
634 c2dev->mmio_txp_ring))) {
635 pr_debug("Unable to create TX ring\n");
636 goto bail1;
637 }
638
639 /* Set the TX pointer to where we left off */
640 c2_port->tx_avail = c2_port->tx_ring.count - 1;
641 c2_port->tx_ring.to_use = c2_port->tx_ring.to_clean =
642 c2_port->tx_ring.start + c2dev->cur_tx;
643
644 /* missing: Initialize MAC */
645
646 BUG_ON(c2_port->tx_ring.to_use != c2_port->tx_ring.to_clean);
647
648 /* Reset the adapter, ensures the driver is in sync with the RXP */
649 c2_reset(c2_port);
650
651 /* Reset the READY bit in the sk_buff RXP headers & adapter HRXDQ */
652 for (i = 0, elem = c2_port->rx_ring.start; i < c2_port->rx_ring.count;
653 i++, elem++) {
654 rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
655 rxp_hdr->flags = 0;
656 __raw_writew(cpu_to_be16(RXP_HRXD_READY),
657 elem->hw_desc + C2_RXP_FLAGS);
658 }
659
660 /* Enable network packets */
661 netif_start_queue(netdev);
662
663 /* Enable IRQ */
664 writel(0, c2dev->regs + C2_IDIS);
665 netimr0 = readl(c2dev->regs + C2_NIMR0);
666 netimr0 &= ~(C2_PCI_HTX_INT | C2_PCI_HRX_INT);
667 writel(netimr0, c2dev->regs + C2_NIMR0);
668
669 /* Tell the stack to ignore arp requests for ipaddrs bound to
670 * other interfaces. This is needed to prevent the host stack
671 * from responding to arp requests to the ipaddr bound on the
672 * rdma interface.
673 */
674 in_dev = in_dev_get(netdev);
675 in_dev->cnf.arp_ignore = 1;
676 in_dev_put(in_dev);
677
678 return 0;
679
680 bail1:
681 c2_rx_clean(c2_port);
682 kfree(c2_port->rx_ring.start);
683
684 bail0:
685 pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
686 c2_port->dma);
687
688 return ret;
689}
690
691static int c2_down(struct net_device *netdev)
692{
693 struct c2_port *c2_port = netdev_priv(netdev);
694 struct c2_dev *c2dev = c2_port->c2dev;
695
696 if (netif_msg_ifdown(c2_port))
697 pr_debug("%s: disabling interface\n",
698 netdev->name);
699
700 /* Wait for all the queued packets to get sent */
701 c2_tx_interrupt(netdev);
702
703 /* Disable network packets */
704 netif_stop_queue(netdev);
705
706 /* Disable IRQs by clearing the interrupt mask */
707 writel(1, c2dev->regs + C2_IDIS);
708 writel(0, c2dev->regs + C2_NIMR0);
709
710 /* missing: Stop transmitter */
711
712 /* missing: Stop receiver */
713
714 /* Reset the adapter, ensures the driver is in sync with the RXP */
715 c2_reset(c2_port);
716
717 /* missing: Turn off LEDs here */
718
719 /* Free all buffers in the host descriptor rings */
720 c2_tx_clean(c2_port);
721 c2_rx_clean(c2_port);
722
723 /* Free the host descriptor rings */
724 kfree(c2_port->rx_ring.start);
725 kfree(c2_port->tx_ring.start);
726 pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
727 c2_port->dma);
728
729 return 0;
730}
731
732static void c2_reset(struct c2_port *c2_port)
733{
734 struct c2_dev *c2dev = c2_port->c2dev;
735 unsigned int cur_rx = c2dev->cur_rx;
736
737 /* Tell the hardware to quiesce */
738 C2_SET_CUR_RX(c2dev, cur_rx | C2_PCI_HRX_QUI);
739
740 /*
741 * The hardware will reset the C2_PCI_HRX_QUI bit once
742 * the RXP is quiesced. Wait 2 seconds for this.
743 */
744 ssleep(2);
745
746 cur_rx = C2_GET_CUR_RX(c2dev);
747
748 if (cur_rx & C2_PCI_HRX_QUI)
749 pr_debug("c2_reset: failed to quiesce the hardware!\n");
750
751 cur_rx &= ~C2_PCI_HRX_QUI;
752
753 c2dev->cur_rx = cur_rx;
754
755 pr_debug("Current RX: %u\n", c2dev->cur_rx);
756}
757
758static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
759{
760 struct c2_port *c2_port = netdev_priv(netdev);
761 struct c2_dev *c2dev = c2_port->c2dev;
762 struct c2_ring *tx_ring = &c2_port->tx_ring;
763 struct c2_element *elem;
764 dma_addr_t mapaddr;
765 u32 maplen;
766 unsigned long flags;
767 unsigned int i;
768
769 spin_lock_irqsave(&c2_port->tx_lock, flags);
770
771 if (unlikely(c2_port->tx_avail < (skb_shinfo(skb)->nr_frags + 1))) {
772 netif_stop_queue(netdev);
773 spin_unlock_irqrestore(&c2_port->tx_lock, flags);
774
775 pr_debug("%s: Tx ring full when queue awake!\n",
776 netdev->name);
777 return NETDEV_TX_BUSY;
778 }
779
780 maplen = skb_headlen(skb);
781 mapaddr =
782 pci_map_single(c2dev->pcidev, skb->data, maplen, PCI_DMA_TODEVICE);
783
784 elem = tx_ring->to_use;
785 elem->skb = skb;
786 elem->mapaddr = mapaddr;
787 elem->maplen = maplen;
788
789 /* Tell HW to xmit */
790 __raw_writeq(cpu_to_be64(mapaddr), elem->hw_desc + C2_TXP_ADDR);
791 __raw_writew(cpu_to_be16(maplen), elem->hw_desc + C2_TXP_LEN);
792 __raw_writew(cpu_to_be16(TXP_HTXD_READY), elem->hw_desc + C2_TXP_FLAGS);
793
794 c2_port->netstats.tx_packets++;
795 c2_port->netstats.tx_bytes += maplen;
796
797 /* Loop thru additional data fragments and queue them */
798 if (skb_shinfo(skb)->nr_frags) {
799 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
800 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
801 maplen = frag->size;
802 mapaddr =
803 pci_map_page(c2dev->pcidev, frag->page,
804 frag->page_offset, maplen,
805 PCI_DMA_TODEVICE);
806
807 elem = elem->next;
808 elem->skb = NULL;
809 elem->mapaddr = mapaddr;
810 elem->maplen = maplen;
811
812 /* Tell HW to xmit */
813 __raw_writeq(cpu_to_be64(mapaddr),
814 elem->hw_desc + C2_TXP_ADDR);
815 __raw_writew(cpu_to_be16(maplen),
816 elem->hw_desc + C2_TXP_LEN);
817 __raw_writew(cpu_to_be16(TXP_HTXD_READY),
818 elem->hw_desc + C2_TXP_FLAGS);
819
820 c2_port->netstats.tx_packets++;
821 c2_port->netstats.tx_bytes += maplen;
822 }
823 }
824
825 tx_ring->to_use = elem->next;
826 c2_port->tx_avail -= (skb_shinfo(skb)->nr_frags + 1);
827
828 if (c2_port->tx_avail <= MAX_SKB_FRAGS + 1) {
829 netif_stop_queue(netdev);
830 if (netif_msg_tx_queued(c2_port))
831 pr_debug("%s: transmit queue full\n",
832 netdev->name);
833 }
834
835 spin_unlock_irqrestore(&c2_port->tx_lock, flags);
836
837 netdev->trans_start = jiffies;
838
839 return NETDEV_TX_OK;
840}
841
842static struct net_device_stats *c2_get_stats(struct net_device *netdev)
843{
844 struct c2_port *c2_port = netdev_priv(netdev);
845
846 return &c2_port->netstats;
847}
848
849static void c2_tx_timeout(struct net_device *netdev)
850{
851 struct c2_port *c2_port = netdev_priv(netdev);
852
853 if (netif_msg_timer(c2_port))
854 pr_debug("%s: tx timeout\n", netdev->name);
855
856 c2_tx_clean(c2_port);
857}
858
859static int c2_change_mtu(struct net_device *netdev, int new_mtu)
860{
861 int ret = 0;
862
863 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
864 return -EINVAL;
865
866 netdev->mtu = new_mtu;
867
868 if (netif_running(netdev)) {
869 c2_down(netdev);
870
871 c2_up(netdev);
872 }
873
874 return ret;
875}
876
877/* Initialize network device */
878static struct net_device *c2_devinit(struct c2_dev *c2dev,
879 void __iomem * mmio_addr)
880{
881 struct c2_port *c2_port = NULL;
882 struct net_device *netdev = alloc_etherdev(sizeof(*c2_port));
883
884 if (!netdev) {
885 pr_debug("c2_port etherdev alloc failed");
886 return NULL;
887 }
888
889 SET_MODULE_OWNER(netdev);
890 SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev);
891
892 netdev->open = c2_up;
893 netdev->stop = c2_down;
894 netdev->hard_start_xmit = c2_xmit_frame;
895 netdev->get_stats = c2_get_stats;
896 netdev->tx_timeout = c2_tx_timeout;
897 netdev->change_mtu = c2_change_mtu;
898 netdev->watchdog_timeo = C2_TX_TIMEOUT;
899 netdev->irq = c2dev->pcidev->irq;
900
901 c2_port = netdev_priv(netdev);
902 c2_port->netdev = netdev;
903 c2_port->c2dev = c2dev;
904 c2_port->msg_enable = netif_msg_init(debug, default_msg);
905 c2_port->tx_ring.count = C2_NUM_TX_DESC;
906 c2_port->rx_ring.count = C2_NUM_RX_DESC;
907
908 spin_lock_init(&c2_port->tx_lock);
909
910 /* Copy our 48-bit ethernet hardware address */
911 memcpy_fromio(netdev->dev_addr, mmio_addr + C2_REGS_ENADDR, 6);
912
913 /* Validate the MAC address */
914 if (!is_valid_ether_addr(netdev->dev_addr)) {
915 pr_debug("Invalid MAC Address\n");
916 c2_print_macaddr(netdev);
917 free_netdev(netdev);
918 return NULL;
919 }
920
921 c2dev->netdev = netdev;
922
923 return netdev;
924}
925
926static int __devinit c2_probe(struct pci_dev *pcidev,
927 const struct pci_device_id *ent)
928{
929 int ret = 0, i;
930 unsigned long reg0_start, reg0_flags, reg0_len;
931 unsigned long reg2_start, reg2_flags, reg2_len;
932 unsigned long reg4_start, reg4_flags, reg4_len;
933 unsigned kva_map_size;
934 struct net_device *netdev = NULL;
935 struct c2_dev *c2dev = NULL;
936 void __iomem *mmio_regs = NULL;
937
938 printk(KERN_INFO PFX "AMSO1100 Gigabit Ethernet driver v%s loaded\n",
939 DRV_VERSION);
940
941 /* Enable PCI device */
942 ret = pci_enable_device(pcidev);
943 if (ret) {
944 printk(KERN_ERR PFX "%s: Unable to enable PCI device\n",
945 pci_name(pcidev));
946 goto bail0;
947 }
948
949 reg0_start = pci_resource_start(pcidev, BAR_0);
950 reg0_len = pci_resource_len(pcidev, BAR_0);
951 reg0_flags = pci_resource_flags(pcidev, BAR_0);
952
953 reg2_start = pci_resource_start(pcidev, BAR_2);
954 reg2_len = pci_resource_len(pcidev, BAR_2);
955 reg2_flags = pci_resource_flags(pcidev, BAR_2);
956
957 reg4_start = pci_resource_start(pcidev, BAR_4);
958 reg4_len = pci_resource_len(pcidev, BAR_4);
959 reg4_flags = pci_resource_flags(pcidev, BAR_4);
960
961 pr_debug("BAR0 size = 0x%lX bytes\n", reg0_len);
962 pr_debug("BAR2 size = 0x%lX bytes\n", reg2_len);
963 pr_debug("BAR4 size = 0x%lX bytes\n", reg4_len);
964
965 /* Make sure PCI base addr are MMIO */
966 if (!(reg0_flags & IORESOURCE_MEM) ||
967 !(reg2_flags & IORESOURCE_MEM) || !(reg4_flags & IORESOURCE_MEM)) {
968 printk(KERN_ERR PFX "PCI regions not an MMIO resource\n");
969 ret = -ENODEV;
970 goto bail1;
971 }
972
973 /* Check for weird/broken PCI region reporting */
974 if ((reg0_len < C2_REG0_SIZE) ||
975 (reg2_len < C2_REG2_SIZE) || (reg4_len < C2_REG4_SIZE)) {
976 printk(KERN_ERR PFX "Invalid PCI region sizes\n");
977 ret = -ENODEV;
978 goto bail1;
979 }
980
981 /* Reserve PCI I/O and memory resources */
982 ret = pci_request_regions(pcidev, DRV_NAME);
983 if (ret) {
984 printk(KERN_ERR PFX "%s: Unable to request regions\n",
985 pci_name(pcidev));
986 goto bail1;
987 }
988
989 if ((sizeof(dma_addr_t) > 4)) {
990 ret = pci_set_dma_mask(pcidev, DMA_64BIT_MASK);
991 if (ret < 0) {
992 printk(KERN_ERR PFX "64b DMA configuration failed\n");
993 goto bail2;
994 }
995 } else {
996 ret = pci_set_dma_mask(pcidev, DMA_32BIT_MASK);
997 if (ret < 0) {
998 printk(KERN_ERR PFX "32b DMA configuration failed\n");
999 goto bail2;
1000 }
1001 }
1002
1003 /* Enables bus-mastering on the device */
1004 pci_set_master(pcidev);
1005
1006 /* Remap the adapter PCI registers in BAR4 */
1007 mmio_regs = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
1008 sizeof(struct c2_adapter_pci_regs));
1009 if (mmio_regs == 0UL) {
1010 printk(KERN_ERR PFX
1011 "Unable to remap adapter PCI registers in BAR4\n");
1012 ret = -EIO;
1013 goto bail2;
1014 }
1015
1016 /* Validate PCI regs magic */
1017 for (i = 0; i < sizeof(c2_magic); i++) {
1018 if (c2_magic[i] != readb(mmio_regs + C2_REGS_MAGIC + i)) {
1019 printk(KERN_ERR PFX "Downlevel Firmware boot loader "
1020 "[%d/%Zd: got 0x%x, exp 0x%x]. Use the cc_flash "
1021 "utility to update your boot loader\n",
1022 i + 1, sizeof(c2_magic),
1023 readb(mmio_regs + C2_REGS_MAGIC + i),
1024 c2_magic[i]);
1025 printk(KERN_ERR PFX "Adapter not claimed\n");
1026 iounmap(mmio_regs);
1027 ret = -EIO;
1028 goto bail2;
1029 }
1030 }
1031
1032 /* Validate the adapter version */
1033 if (be32_to_cpu(readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) {
1034 printk(KERN_ERR PFX "Version mismatch "
1035 "[fw=%u, c2=%u], Adapter not claimed\n",
1036 be32_to_cpu(readl(mmio_regs + C2_REGS_VERS)),
1037 C2_VERSION);
1038 ret = -EINVAL;
1039 iounmap(mmio_regs);
1040 goto bail2;
1041 }
1042
1043 /* Validate the adapter IVN */
1044 if (be32_to_cpu(readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) {
1045 printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using "
1046 "the OpenIB device support kit. "
1047 "[fw=0x%x, c2=0x%x], Adapter not claimed\n",
1048 be32_to_cpu(readl(mmio_regs + C2_REGS_IVN)),
1049 C2_IVN);
1050 ret = -EINVAL;
1051 iounmap(mmio_regs);
1052 goto bail2;
1053 }
1054
1055 /* Allocate hardware structure */
1056 c2dev = (struct c2_dev *) ib_alloc_device(sizeof(*c2dev));
1057 if (!c2dev) {
1058 printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n",
1059 pci_name(pcidev));
1060 ret = -ENOMEM;
1061 iounmap(mmio_regs);
1062 goto bail2;
1063 }
1064
1065 memset(c2dev, 0, sizeof(*c2dev));
1066 spin_lock_init(&c2dev->lock);
1067 c2dev->pcidev = pcidev;
1068 c2dev->cur_tx = 0;
1069
1070 /* Get the last RX index */
1071 c2dev->cur_rx =
1072 (be32_to_cpu(readl(mmio_regs + C2_REGS_HRX_CUR)) -
1073 0xffffc000) / sizeof(struct c2_rxp_desc);
1074
1075 /* Request an interrupt line for the driver */
1076 ret = request_irq(pcidev->irq, c2_interrupt, SA_SHIRQ, DRV_NAME, c2dev);
1077 if (ret) {
1078 printk(KERN_ERR PFX "%s: requested IRQ %u is busy\n",
1079 pci_name(pcidev), pcidev->irq);
1080 iounmap(mmio_regs);
1081 goto bail3;
1082 }
1083
1084 /* Set driver specific data */
1085 pci_set_drvdata(pcidev, c2dev);
1086
1087 /* Initialize network device */
1088 if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) {
1089 iounmap(mmio_regs);
1090 goto bail4;
1091 }
1092
1093 /* Save off the actual size prior to unmapping mmio_regs */
1094 kva_map_size = be32_to_cpu(readl(mmio_regs + C2_REGS_PCI_WINSIZE));
1095
1096 /* Unmap the adapter PCI registers in BAR4 */
1097 iounmap(mmio_regs);
1098
1099 /* Register network device */
1100 ret = register_netdev(netdev);
1101 if (ret) {
1102 printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n",
1103 ret);
1104 goto bail5;
1105 }
1106
1107 /* Disable network packets */
1108 netif_stop_queue(netdev);
1109
1110 /* Remap the adapter HRXDQ PA space to kernel VA space */
1111 c2dev->mmio_rxp_ring = ioremap_nocache(reg4_start + C2_RXP_HRXDQ_OFFSET,
1112 C2_RXP_HRXDQ_SIZE);
1113 if (c2dev->mmio_rxp_ring == 0UL) {
1114 printk(KERN_ERR PFX "Unable to remap MMIO HRXDQ region\n");
1115 ret = -EIO;
1116 goto bail6;
1117 }
1118
1119 /* Remap the adapter HTXDQ PA space to kernel VA space */
1120 c2dev->mmio_txp_ring = ioremap_nocache(reg4_start + C2_TXP_HTXDQ_OFFSET,
1121 C2_TXP_HTXDQ_SIZE);
1122 if (c2dev->mmio_txp_ring == 0UL) {
1123 printk(KERN_ERR PFX "Unable to remap MMIO HTXDQ region\n");
1124 ret = -EIO;
1125 goto bail7;
1126 }
1127
1128 /* Save off the current RX index in the last 4 bytes of the TXP Ring */
1129 C2_SET_CUR_RX(c2dev, c2dev->cur_rx);
1130
1131 /* Remap the PCI registers in adapter BAR0 to kernel VA space */
1132 c2dev->regs = ioremap_nocache(reg0_start, reg0_len);
1133 if (c2dev->regs == 0UL) {
1134 printk(KERN_ERR PFX "Unable to remap BAR0\n");
1135 ret = -EIO;
1136 goto bail8;
1137 }
1138
1139 /* Remap the PCI registers in adapter BAR4 to kernel VA space */
1140 c2dev->pa = reg4_start + C2_PCI_REGS_OFFSET;
1141 c2dev->kva = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
1142 kva_map_size);
1143 if (c2dev->kva == 0UL) {
1144 printk(KERN_ERR PFX "Unable to remap BAR4\n");
1145 ret = -EIO;
1146 goto bail9;
1147 }
1148
1149 /* Print out the MAC address */
1150 c2_print_macaddr(netdev);
1151
1152 ret = c2_rnic_init(c2dev);
1153 if (ret) {
1154 printk(KERN_ERR PFX "c2_rnic_init failed: %d\n", ret);
1155 goto bail10;
1156 }
1157
1158 c2_register_device(c2dev);
1159
1160 return 0;
1161
1162 bail10:
1163 iounmap(c2dev->kva);
1164
1165 bail9:
1166 iounmap(c2dev->regs);
1167
1168 bail8:
1169 iounmap(c2dev->mmio_txp_ring);
1170
1171 bail7:
1172 iounmap(c2dev->mmio_rxp_ring);
1173
1174 bail6:
1175 unregister_netdev(netdev);
1176
1177 bail5:
1178 free_netdev(netdev);
1179
1180 bail4:
1181 free_irq(pcidev->irq, c2dev);
1182
1183 bail3:
1184 ib_dealloc_device(&c2dev->ibdev);
1185
1186 bail2:
1187 pci_release_regions(pcidev);
1188
1189 bail1:
1190 pci_disable_device(pcidev);
1191
1192 bail0:
1193 return ret;
1194}
1195
1196static void __devexit c2_remove(struct pci_dev *pcidev)
1197{
1198 struct c2_dev *c2dev = pci_get_drvdata(pcidev);
1199 struct net_device *netdev = c2dev->netdev;
1200
1201 /* Unregister with OpenIB */
1202 c2_unregister_device(c2dev);
1203
1204 /* Clean up the RNIC resources */
1205 c2_rnic_term(c2dev);
1206
1207 /* Remove network device from the kernel */
1208 unregister_netdev(netdev);
1209
1210 /* Free network device */
1211 free_netdev(netdev);
1212
1213 /* Free the interrupt line */
1214 free_irq(pcidev->irq, c2dev);
1215
1216 /* missing: Turn LEDs off here */
1217
1218 /* Unmap adapter PA space */
1219 iounmap(c2dev->kva);
1220 iounmap(c2dev->regs);
1221 iounmap(c2dev->mmio_txp_ring);
1222 iounmap(c2dev->mmio_rxp_ring);
1223
1224 /* Free the hardware structure */
1225 ib_dealloc_device(&c2dev->ibdev);
1226
1227 /* Release reserved PCI I/O and memory resources */
1228 pci_release_regions(pcidev);
1229
1230 /* Disable PCI device */
1231 pci_disable_device(pcidev);
1232
1233 /* Clear driver specific data */
1234 pci_set_drvdata(pcidev, NULL);
1235}
1236
1237static struct pci_driver c2_pci_driver = {
1238 .name = DRV_NAME,
1239 .id_table = c2_pci_table,
1240 .probe = c2_probe,
1241 .remove = __devexit_p(c2_remove),
1242};
1243
1244static int __init c2_init_module(void)
1245{
1246 return pci_module_init(&c2_pci_driver);
1247}
1248
1249static void __exit c2_exit_module(void)
1250{
1251 pci_unregister_driver(&c2_pci_driver);
1252}
1253
1254module_init(c2_init_module);
1255module_exit(c2_exit_module);
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h
new file mode 100644
index 000000000000..1b17dcdd0505
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2.h
@@ -0,0 +1,551 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#ifndef __C2_H
35#define __C2_H
36
37#include <linux/netdevice.h>
38#include <linux/spinlock.h>
39#include <linux/kernel.h>
40#include <linux/pci.h>
41#include <linux/dma-mapping.h>
42#include <linux/idr.h>
43#include <asm/semaphore.h>
44
45#include "c2_provider.h"
46#include "c2_mq.h"
47#include "c2_status.h"
48
49#define DRV_NAME "c2"
50#define DRV_VERSION "1.1"
51#define PFX DRV_NAME ": "
52
53#define BAR_0 0
54#define BAR_2 2
55#define BAR_4 4
56
57#define RX_BUF_SIZE (1536 + 8)
58#define ETH_JUMBO_MTU 9000
59#define C2_MAGIC "CEPHEUS"
60#define C2_VERSION 4
61#define C2_IVN (18 & 0x7fffffff)
62
63#define C2_REG0_SIZE (16 * 1024)
64#define C2_REG2_SIZE (2 * 1024 * 1024)
65#define C2_REG4_SIZE (256 * 1024 * 1024)
66#define C2_NUM_TX_DESC 341
67#define C2_NUM_RX_DESC 256
68#define C2_PCI_REGS_OFFSET (0x10000)
69#define C2_RXP_HRXDQ_OFFSET (((C2_REG4_SIZE)/2))
70#define C2_RXP_HRXDQ_SIZE (4096)
71#define C2_TXP_HTXDQ_OFFSET (((C2_REG4_SIZE)/2) + C2_RXP_HRXDQ_SIZE)
72#define C2_TXP_HTXDQ_SIZE (4096)
73#define C2_TX_TIMEOUT (6*HZ)
74
75/* CEPHEUS */
76static const u8 c2_magic[] = {
77 0x43, 0x45, 0x50, 0x48, 0x45, 0x55, 0x53
78};
79
80enum adapter_pci_regs {
81 C2_REGS_MAGIC = 0x0000,
82 C2_REGS_VERS = 0x0008,
83 C2_REGS_IVN = 0x000C,
84 C2_REGS_PCI_WINSIZE = 0x0010,
85 C2_REGS_Q0_QSIZE = 0x0014,
86 C2_REGS_Q0_MSGSIZE = 0x0018,
87 C2_REGS_Q0_POOLSTART = 0x001C,
88 C2_REGS_Q0_SHARED = 0x0020,
89 C2_REGS_Q1_QSIZE = 0x0024,
90 C2_REGS_Q1_MSGSIZE = 0x0028,
91 C2_REGS_Q1_SHARED = 0x0030,
92 C2_REGS_Q2_QSIZE = 0x0034,
93 C2_REGS_Q2_MSGSIZE = 0x0038,
94 C2_REGS_Q2_SHARED = 0x0040,
95 C2_REGS_ENADDR = 0x004C,
96 C2_REGS_RDMA_ENADDR = 0x0054,
97 C2_REGS_HRX_CUR = 0x006C,
98};
99
100struct c2_adapter_pci_regs {
101 char reg_magic[8];
102 u32 version;
103 u32 ivn;
104 u32 pci_window_size;
105 u32 q0_q_size;
106 u32 q0_msg_size;
107 u32 q0_pool_start;
108 u32 q0_shared;
109 u32 q1_q_size;
110 u32 q1_msg_size;
111 u32 q1_pool_start;
112 u32 q1_shared;
113 u32 q2_q_size;
114 u32 q2_msg_size;
115 u32 q2_pool_start;
116 u32 q2_shared;
117 u32 log_start;
118 u32 log_size;
119 u8 host_enaddr[8];
120 u8 rdma_enaddr[8];
121 u32 crash_entry;
122 u32 crash_ready[2];
123 u32 fw_txd_cur;
124 u32 fw_hrxd_cur;
125 u32 fw_rxd_cur;
126};
127
128enum pci_regs {
129 C2_HISR = 0x0000,
130 C2_DISR = 0x0004,
131 C2_HIMR = 0x0008,
132 C2_DIMR = 0x000C,
133 C2_NISR0 = 0x0010,
134 C2_NISR1 = 0x0014,
135 C2_NIMR0 = 0x0018,
136 C2_NIMR1 = 0x001C,
137 C2_IDIS = 0x0020,
138};
139
140enum {
141 C2_PCI_HRX_INT = 1 << 8,
142 C2_PCI_HTX_INT = 1 << 17,
143 C2_PCI_HRX_QUI = 1 << 31,
144};
145
146/*
147 * Cepheus registers in BAR0.
148 */
149struct c2_pci_regs {
150 u32 hostisr;
151 u32 dmaisr;
152 u32 hostimr;
153 u32 dmaimr;
154 u32 netisr0;
155 u32 netisr1;
156 u32 netimr0;
157 u32 netimr1;
158 u32 int_disable;
159};
160
161/* TXP flags */
162enum c2_txp_flags {
163 TXP_HTXD_DONE = 0,
164 TXP_HTXD_READY = 1 << 0,
165 TXP_HTXD_UNINIT = 1 << 1,
166};
167
168/* RXP flags */
169enum c2_rxp_flags {
170 RXP_HRXD_UNINIT = 0,
171 RXP_HRXD_READY = 1 << 0,
172 RXP_HRXD_DONE = 1 << 1,
173};
174
175/* RXP status */
176enum c2_rxp_status {
177 RXP_HRXD_ZERO = 0,
178 RXP_HRXD_OK = 1 << 0,
179 RXP_HRXD_BUF_OV = 1 << 1,
180};
181
182/* TXP descriptor fields */
183enum txp_desc {
184 C2_TXP_FLAGS = 0x0000,
185 C2_TXP_LEN = 0x0002,
186 C2_TXP_ADDR = 0x0004,
187};
188
189/* RXP descriptor fields */
190enum rxp_desc {
191 C2_RXP_FLAGS = 0x0000,
192 C2_RXP_STATUS = 0x0002,
193 C2_RXP_COUNT = 0x0004,
194 C2_RXP_LEN = 0x0006,
195 C2_RXP_ADDR = 0x0008,
196};
197
198struct c2_txp_desc {
199 u16 flags;
200 u16 len;
201 u64 addr;
202} __attribute__ ((packed));
203
204struct c2_rxp_desc {
205 u16 flags;
206 u16 status;
207 u16 count;
208 u16 len;
209 u64 addr;
210} __attribute__ ((packed));
211
212struct c2_rxp_hdr {
213 u16 flags;
214 u16 status;
215 u16 len;
216 u16 rsvd;
217} __attribute__ ((packed));
218
219struct c2_tx_desc {
220 u32 len;
221 u32 status;
222 dma_addr_t next_offset;
223};
224
225struct c2_rx_desc {
226 u32 len;
227 u32 status;
228 dma_addr_t next_offset;
229};
230
231struct c2_alloc {
232 u32 last;
233 u32 max;
234 spinlock_t lock;
235 unsigned long *table;
236};
237
238struct c2_array {
239 struct {
240 void **page;
241 int used;
242 } *page_list;
243};
244
245/*
246 * The MQ shared pointer pool is organized as a linked list of
247 * chunks. Each chunk contains a linked list of free shared pointers
248 * that can be allocated to a given user mode client.
249 *
250 */
251struct sp_chunk {
252 struct sp_chunk *next;
253 dma_addr_t dma_addr;
254 DECLARE_PCI_UNMAP_ADDR(mapping);
255 u16 head;
256 u16 shared_ptr[0];
257};
258
259struct c2_pd_table {
260 u32 last;
261 u32 max;
262 spinlock_t lock;
263 unsigned long *table;
264};
265
266struct c2_qp_table {
267 struct idr idr;
268 spinlock_t lock;
269 int last;
270};
271
272struct c2_element {
273 struct c2_element *next;
274 void *ht_desc; /* host descriptor */
275 void __iomem *hw_desc; /* hardware descriptor */
276 struct sk_buff *skb;
277 dma_addr_t mapaddr;
278 u32 maplen;
279};
280
281struct c2_ring {
282 struct c2_element *to_clean;
283 struct c2_element *to_use;
284 struct c2_element *start;
285 unsigned long count;
286};
287
288struct c2_dev {
289 struct ib_device ibdev;
290 void __iomem *regs;
291 void __iomem *mmio_txp_ring; /* remapped adapter memory for hw rings */
292 void __iomem *mmio_rxp_ring;
293 spinlock_t lock;
294 struct pci_dev *pcidev;
295 struct net_device *netdev;
296 struct net_device *pseudo_netdev;
297 unsigned int cur_tx;
298 unsigned int cur_rx;
299 u32 adapter_handle;
300 int device_cap_flags;
301 void __iomem *kva; /* KVA device memory */
302 unsigned long pa; /* PA device memory */
303 void **qptr_array;
304
305 kmem_cache_t *host_msg_cache;
306
307 struct list_head cca_link; /* adapter list */
308 struct list_head eh_wakeup_list; /* event wakeup list */
309 wait_queue_head_t req_vq_wo;
310
311 /* Cached RNIC properties */
312 struct ib_device_attr props;
313
314 struct c2_pd_table pd_table;
315 struct c2_qp_table qp_table;
316 int ports; /* num of GigE ports */
317 int devnum;
318 spinlock_t vqlock; /* sync vbs req MQ */
319
320 /* Verbs Queues */
321 struct c2_mq req_vq; /* Verbs Request MQ */
322 struct c2_mq rep_vq; /* Verbs Reply MQ */
323 struct c2_mq aeq; /* Async Events MQ */
324
325 /* Kernel client MQs */
326 struct sp_chunk *kern_mqsp_pool;
327
328 /* Device updates these values when posting messages to a host
329 * target queue */
330 u16 req_vq_shared;
331 u16 rep_vq_shared;
332 u16 aeq_shared;
333 u16 irq_claimed;
334
335 /*
336 * Shared host target pages for user-accessible MQs.
337 */
338 int hthead; /* index of first free entry */
339 void *htpages; /* kernel vaddr */
340 int htlen; /* length of htpages memory */
341 void *htuva; /* user mapped vaddr */
342 spinlock_t htlock; /* serialize allocation */
343
344 u64 adapter_hint_uva; /* access to the activity FIFO */
345
346 // spinlock_t aeq_lock;
347 // spinlock_t rnic_lock;
348
349 u16 *hint_count;
350 dma_addr_t hint_count_dma;
351 u16 hints_read;
352
353 int init; /* TRUE if it's ready */
354 char ae_cache_name[16];
355 char vq_cache_name[16];
356};
357
358struct c2_port {
359 u32 msg_enable;
360 struct c2_dev *c2dev;
361 struct net_device *netdev;
362
363 spinlock_t tx_lock;
364 u32 tx_avail;
365 struct c2_ring tx_ring;
366 struct c2_ring rx_ring;
367
368 void *mem; /* PCI memory for host rings */
369 dma_addr_t dma;
370 unsigned long mem_size;
371
372 u32 rx_buf_size;
373
374 struct net_device_stats netstats;
375};
376
377/*
378 * Activity FIFO registers in BAR0.
379 */
380#define PCI_BAR0_HOST_HINT 0x100
381#define PCI_BAR0_ADAPTER_HINT 0x2000
382
383/*
384 * Ammasso PCI vendor id and Cepheus PCI device id.
385 */
386#define CQ_ARMED 0x01
387#define CQ_WAIT_FOR_DMA 0x80
388
389/*
390 * The format of a hint is as follows:
391 * Lower 16 bits are the count of hints for the queue.
392 * Next 15 bits are the qp_index
393 * Upper most bit depends on who reads it:
394 * If read by producer, then it means Full (1) or Not-Full (0)
395 * If read by consumer, then it means Empty (1) or Not-Empty (0)
396 */
397#define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count)
398#define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16)
399#define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF)
400
401
402/*
403 * The following defines the offset in SDRAM for the c2_adapter_pci_regs_t
404 * struct.
405 */
406#define C2_ADAPTER_PCI_REGS_OFFSET 0x10000
407
408#ifndef readq
409static inline u64 readq(const void __iomem * addr)
410{
411 u64 ret = readl(addr + 4);
412 ret <<= 32;
413 ret |= readl(addr);
414
415 return ret;
416}
417#endif
418
419#ifndef writeq
420static inline void __raw_writeq(u64 val, void __iomem * addr)
421{
422 __raw_writel((u32) (val), addr);
423 __raw_writel((u32) (val >> 32), (addr + 4));
424}
425#endif
426
427#define C2_SET_CUR_RX(c2dev, cur_rx) \
428 __raw_writel(cpu_to_be32(cur_rx), c2dev->mmio_txp_ring + 4092)
429
430#define C2_GET_CUR_RX(c2dev) \
431 be32_to_cpu(readl(c2dev->mmio_txp_ring + 4092))
432
433static inline struct c2_dev *to_c2dev(struct ib_device *ibdev)
434{
435 return container_of(ibdev, struct c2_dev, ibdev);
436}
437
438static inline int c2_errno(void *reply)
439{
440 switch (c2_wr_get_result(reply)) {
441 case C2_OK:
442 return 0;
443 case CCERR_NO_BUFS:
444 case CCERR_INSUFFICIENT_RESOURCES:
445 case CCERR_ZERO_RDMA_READ_RESOURCES:
446 return -ENOMEM;
447 case CCERR_MR_IN_USE:
448 case CCERR_QP_IN_USE:
449 return -EBUSY;
450 case CCERR_ADDR_IN_USE:
451 return -EADDRINUSE;
452 case CCERR_ADDR_NOT_AVAIL:
453 return -EADDRNOTAVAIL;
454 case CCERR_CONN_RESET:
455 return -ECONNRESET;
456 case CCERR_NOT_IMPLEMENTED:
457 case CCERR_INVALID_WQE:
458 return -ENOSYS;
459 case CCERR_QP_NOT_PRIVILEGED:
460 return -EPERM;
461 case CCERR_STACK_ERROR:
462 return -EPROTO;
463 case CCERR_ACCESS_VIOLATION:
464 case CCERR_BASE_AND_BOUNDS_VIOLATION:
465 return -EFAULT;
466 case CCERR_STAG_STATE_NOT_INVALID:
467 case CCERR_INVALID_ADDRESS:
468 case CCERR_INVALID_CQ:
469 case CCERR_INVALID_EP:
470 case CCERR_INVALID_MODIFIER:
471 case CCERR_INVALID_MTU:
472 case CCERR_INVALID_PD_ID:
473 case CCERR_INVALID_QP:
474 case CCERR_INVALID_RNIC:
475 case CCERR_INVALID_STAG:
476 return -EINVAL;
477 default:
478 return -EAGAIN;
479 }
480}
481
482/* Device */
483extern int c2_register_device(struct c2_dev *c2dev);
484extern void c2_unregister_device(struct c2_dev *c2dev);
485extern int c2_rnic_init(struct c2_dev *c2dev);
486extern void c2_rnic_term(struct c2_dev *c2dev);
487extern void c2_rnic_interrupt(struct c2_dev *c2dev);
488extern int c2_del_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask);
489extern int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask);
490
491/* QPs */
492extern int c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd,
493 struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp);
494extern void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp);
495extern struct ib_qp *c2_get_qp(struct ib_device *device, int qpn);
496extern int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
497 struct ib_qp_attr *attr, int attr_mask);
498extern int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp,
499 int ord, int ird);
500extern int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
501 struct ib_send_wr **bad_wr);
502extern int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
503 struct ib_recv_wr **bad_wr);
504extern void __devinit c2_init_qp_table(struct c2_dev *c2dev);
505extern void __devexit c2_cleanup_qp_table(struct c2_dev *c2dev);
506extern void c2_set_qp_state(struct c2_qp *, int);
507extern struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn);
508
509/* PDs */
510extern int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd);
511extern void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd);
512extern int __devinit c2_init_pd_table(struct c2_dev *c2dev);
513extern void __devexit c2_cleanup_pd_table(struct c2_dev *c2dev);
514
515/* CQs */
516extern int c2_init_cq(struct c2_dev *c2dev, int entries,
517 struct c2_ucontext *ctx, struct c2_cq *cq);
518extern void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq);
519extern void c2_cq_event(struct c2_dev *c2dev, u32 mq_index);
520extern void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index);
521extern int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
522extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify);
523
524/* CM */
525extern int c2_llp_connect(struct iw_cm_id *cm_id,
526 struct iw_cm_conn_param *iw_param);
527extern int c2_llp_accept(struct iw_cm_id *cm_id,
528 struct iw_cm_conn_param *iw_param);
529extern int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata,
530 u8 pdata_len);
531extern int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog);
532extern int c2_llp_service_destroy(struct iw_cm_id *cm_id);
533
534/* MM */
535extern int c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list,
536 int page_size, int pbl_depth, u32 length,
537 u32 off, u64 *va, enum c2_acf acf,
538 struct c2_mr *mr);
539extern int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index);
540
541/* AE */
542extern void c2_ae_event(struct c2_dev *c2dev, u32 mq_index);
543
544/* MQSP Allocator */
545extern int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
546 struct sp_chunk **root);
547extern void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root);
548extern u16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
549 dma_addr_t *dma_addr, gfp_t gfp_mask);
550extern void c2_free_mqsp(u16 * mqsp);
551#endif
diff --git a/drivers/infiniband/hw/amso1100/c2_ae.c b/drivers/infiniband/hw/amso1100/c2_ae.c
new file mode 100644
index 000000000000..08f46c83a3a4
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_ae.c
@@ -0,0 +1,321 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#include "c2.h"
34#include <rdma/iw_cm.h>
35#include "c2_status.h"
36#include "c2_ae.h"
37
38static int c2_convert_cm_status(u32 c2_status)
39{
40 switch (c2_status) {
41 case C2_CONN_STATUS_SUCCESS:
42 return 0;
43 case C2_CONN_STATUS_REJECTED:
44 return -ENETRESET;
45 case C2_CONN_STATUS_REFUSED:
46 return -ECONNREFUSED;
47 case C2_CONN_STATUS_TIMEDOUT:
48 return -ETIMEDOUT;
49 case C2_CONN_STATUS_NETUNREACH:
50 return -ENETUNREACH;
51 case C2_CONN_STATUS_HOSTUNREACH:
52 return -EHOSTUNREACH;
53 case C2_CONN_STATUS_INVALID_RNIC:
54 return -EINVAL;
55 case C2_CONN_STATUS_INVALID_QP:
56 return -EINVAL;
57 case C2_CONN_STATUS_INVALID_QP_STATE:
58 return -EINVAL;
59 case C2_CONN_STATUS_ADDR_NOT_AVAIL:
60 return -EADDRNOTAVAIL;
61 default:
62 printk(KERN_ERR PFX
63 "%s - Unable to convert CM status: %d\n",
64 __FUNCTION__, c2_status);
65 return -EIO;
66 }
67}
68
69#ifdef DEBUG
70static const char* to_event_str(int event)
71{
72 static const char* event_str[] = {
73 "CCAE_REMOTE_SHUTDOWN",
74 "CCAE_ACTIVE_CONNECT_RESULTS",
75 "CCAE_CONNECTION_REQUEST",
76 "CCAE_LLP_CLOSE_COMPLETE",
77 "CCAE_TERMINATE_MESSAGE_RECEIVED",
78 "CCAE_LLP_CONNECTION_RESET",
79 "CCAE_LLP_CONNECTION_LOST",
80 "CCAE_LLP_SEGMENT_SIZE_INVALID",
81 "CCAE_LLP_INVALID_CRC",
82 "CCAE_LLP_BAD_FPDU",
83 "CCAE_INVALID_DDP_VERSION",
84 "CCAE_INVALID_RDMA_VERSION",
85 "CCAE_UNEXPECTED_OPCODE",
86 "CCAE_INVALID_DDP_QUEUE_NUMBER",
87 "CCAE_RDMA_READ_NOT_ENABLED",
88 "CCAE_RDMA_WRITE_NOT_ENABLED",
89 "CCAE_RDMA_READ_TOO_SMALL",
90 "CCAE_NO_L_BIT",
91 "CCAE_TAGGED_INVALID_STAG",
92 "CCAE_TAGGED_BASE_BOUNDS_VIOLATION",
93 "CCAE_TAGGED_ACCESS_RIGHTS_VIOLATION",
94 "CCAE_TAGGED_INVALID_PD",
95 "CCAE_WRAP_ERROR",
96 "CCAE_BAD_CLOSE",
97 "CCAE_BAD_LLP_CLOSE",
98 "CCAE_INVALID_MSN_RANGE",
99 "CCAE_INVALID_MSN_GAP",
100 "CCAE_IRRQ_OVERFLOW",
101 "CCAE_IRRQ_MSN_GAP",
102 "CCAE_IRRQ_MSN_RANGE",
103 "CCAE_IRRQ_INVALID_STAG",
104 "CCAE_IRRQ_BASE_BOUNDS_VIOLATION",
105 "CCAE_IRRQ_ACCESS_RIGHTS_VIOLATION",
106 "CCAE_IRRQ_INVALID_PD",
107 "CCAE_IRRQ_WRAP_ERROR",
108 "CCAE_CQ_SQ_COMPLETION_OVERFLOW",
109 "CCAE_CQ_RQ_COMPLETION_ERROR",
110 "CCAE_QP_SRQ_WQE_ERROR",
111 "CCAE_QP_LOCAL_CATASTROPHIC_ERROR",
112 "CCAE_CQ_OVERFLOW",
113 "CCAE_CQ_OPERATION_ERROR",
114 "CCAE_SRQ_LIMIT_REACHED",
115 "CCAE_QP_RQ_LIMIT_REACHED",
116 "CCAE_SRQ_CATASTROPHIC_ERROR",
117 "CCAE_RNIC_CATASTROPHIC_ERROR"
118 };
119
120 if (event < CCAE_REMOTE_SHUTDOWN ||
121 event > CCAE_RNIC_CATASTROPHIC_ERROR)
122 return "<invalid event>";
123
124 event -= CCAE_REMOTE_SHUTDOWN;
125 return event_str[event];
126}
127
128static const char *to_qp_state_str(int state)
129{
130 switch (state) {
131 case C2_QP_STATE_IDLE:
132 return "C2_QP_STATE_IDLE";
133 case C2_QP_STATE_CONNECTING:
134 return "C2_QP_STATE_CONNECTING";
135 case C2_QP_STATE_RTS:
136 return "C2_QP_STATE_RTS";
137 case C2_QP_STATE_CLOSING:
138 return "C2_QP_STATE_CLOSING";
139 case C2_QP_STATE_TERMINATE:
140 return "C2_QP_STATE_TERMINATE";
141 case C2_QP_STATE_ERROR:
142 return "C2_QP_STATE_ERROR";
143 default:
144 return "<invalid QP state>";
145 };
146}
147#endif
148
149void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
150{
151 struct c2_mq *mq = c2dev->qptr_array[mq_index];
152 union c2wr *wr;
153 void *resource_user_context;
154 struct iw_cm_event cm_event;
155 struct ib_event ib_event;
156 enum c2_resource_indicator resource_indicator;
157 enum c2_event_id event_id;
158 unsigned long flags;
159 int status;
160
161 /*
162 * retreive the message
163 */
164 wr = c2_mq_consume(mq);
165 if (!wr)
166 return;
167
168 memset(&ib_event, 0, sizeof(ib_event));
169 memset(&cm_event, 0, sizeof(cm_event));
170
171 event_id = c2_wr_get_id(wr);
172 resource_indicator = be32_to_cpu(wr->ae.ae_generic.resource_type);
173 resource_user_context =
174 (void *) (unsigned long) wr->ae.ae_generic.user_context;
175
176 status = cm_event.status = c2_convert_cm_status(c2_wr_get_result(wr));
177
178 pr_debug("event received c2_dev=%p, event_id=%d, "
179 "resource_indicator=%d, user_context=%p, status = %d\n",
180 c2dev, event_id, resource_indicator, resource_user_context,
181 status);
182
183 switch (resource_indicator) {
184 case C2_RES_IND_QP:{
185
186 struct c2_qp *qp = (struct c2_qp *)resource_user_context;
187 struct iw_cm_id *cm_id = qp->cm_id;
188 struct c2wr_ae_active_connect_results *res;
189
190 if (!cm_id) {
191 pr_debug("event received, but cm_id is <nul>, qp=%p!\n",
192 qp);
193 goto ignore_it;
194 }
195 pr_debug("%s: event = %s, user_context=%llx, "
196 "resource_type=%x, "
197 "resource=%x, qp_state=%s\n",
198 __FUNCTION__,
199 to_event_str(event_id),
200 be64_to_cpu(wr->ae.ae_generic.user_context),
201 be32_to_cpu(wr->ae.ae_generic.resource_type),
202 be32_to_cpu(wr->ae.ae_generic.resource),
203 to_qp_state_str(be32_to_cpu(wr->ae.ae_generic.qp_state)));
204
205 c2_set_qp_state(qp, be32_to_cpu(wr->ae.ae_generic.qp_state));
206
207 switch (event_id) {
208 case CCAE_ACTIVE_CONNECT_RESULTS:
209 res = &wr->ae.ae_active_connect_results;
210 cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
211 cm_event.local_addr.sin_addr.s_addr = res->laddr;
212 cm_event.remote_addr.sin_addr.s_addr = res->raddr;
213 cm_event.local_addr.sin_port = res->lport;
214 cm_event.remote_addr.sin_port = res->rport;
215 if (status == 0) {
216 cm_event.private_data_len =
217 be32_to_cpu(res->private_data_length);
218 cm_event.private_data = res->private_data;
219 } else {
220 spin_lock_irqsave(&qp->lock, flags);
221 if (qp->cm_id) {
222 qp->cm_id->rem_ref(qp->cm_id);
223 qp->cm_id = NULL;
224 }
225 spin_unlock_irqrestore(&qp->lock, flags);
226 cm_event.private_data_len = 0;
227 cm_event.private_data = NULL;
228 }
229 if (cm_id->event_handler)
230 cm_id->event_handler(cm_id, &cm_event);
231 break;
232 case CCAE_TERMINATE_MESSAGE_RECEIVED:
233 case CCAE_CQ_SQ_COMPLETION_OVERFLOW:
234 ib_event.device = &c2dev->ibdev;
235 ib_event.element.qp = &qp->ibqp;
236 ib_event.event = IB_EVENT_QP_REQ_ERR;
237
238 if (qp->ibqp.event_handler)
239 qp->ibqp.event_handler(&ib_event,
240 qp->ibqp.
241 qp_context);
242 break;
243 case CCAE_BAD_CLOSE:
244 case CCAE_LLP_CLOSE_COMPLETE:
245 case CCAE_LLP_CONNECTION_RESET:
246 case CCAE_LLP_CONNECTION_LOST:
247 BUG_ON(cm_id->event_handler==(void*)0x6b6b6b6b);
248
249 spin_lock_irqsave(&qp->lock, flags);
250 if (qp->cm_id) {
251 qp->cm_id->rem_ref(qp->cm_id);
252 qp->cm_id = NULL;
253 }
254 spin_unlock_irqrestore(&qp->lock, flags);
255 cm_event.event = IW_CM_EVENT_CLOSE;
256 cm_event.status = 0;
257 if (cm_id->event_handler)
258 cm_id->event_handler(cm_id, &cm_event);
259 break;
260 default:
261 BUG_ON(1);
262 pr_debug("%s:%d Unexpected event_id=%d on QP=%p, "
263 "CM_ID=%p\n",
264 __FUNCTION__, __LINE__,
265 event_id, qp, cm_id);
266 break;
267 }
268 break;
269 }
270
271 case C2_RES_IND_EP:{
272
273 struct c2wr_ae_connection_request *req =
274 &wr->ae.ae_connection_request;
275 struct iw_cm_id *cm_id =
276 (struct iw_cm_id *)resource_user_context;
277
278 pr_debug("C2_RES_IND_EP event_id=%d\n", event_id);
279 if (event_id != CCAE_CONNECTION_REQUEST) {
280 pr_debug("%s: Invalid event_id: %d\n",
281 __FUNCTION__, event_id);
282 break;
283 }
284 cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
285 cm_event.provider_data = (void*)(unsigned long)req->cr_handle;
286 cm_event.local_addr.sin_addr.s_addr = req->laddr;
287 cm_event.remote_addr.sin_addr.s_addr = req->raddr;
288 cm_event.local_addr.sin_port = req->lport;
289 cm_event.remote_addr.sin_port = req->rport;
290 cm_event.private_data_len =
291 be32_to_cpu(req->private_data_length);
292 cm_event.private_data = req->private_data;
293
294 if (cm_id->event_handler)
295 cm_id->event_handler(cm_id, &cm_event);
296 break;
297 }
298
299 case C2_RES_IND_CQ:{
300 struct c2_cq *cq =
301 (struct c2_cq *) resource_user_context;
302
303 pr_debug("IB_EVENT_CQ_ERR\n");
304 ib_event.device = &c2dev->ibdev;
305 ib_event.element.cq = &cq->ibcq;
306 ib_event.event = IB_EVENT_CQ_ERR;
307
308 if (cq->ibcq.event_handler)
309 cq->ibcq.event_handler(&ib_event,
310 cq->ibcq.cq_context);
311 }
312
313 default:
314 printk("Bad resource indicator = %d\n",
315 resource_indicator);
316 break;
317 }
318
319 ignore_it:
320 c2_mq_free(mq);
321}
diff --git a/drivers/infiniband/hw/amso1100/c2_ae.h b/drivers/infiniband/hw/amso1100/c2_ae.h
new file mode 100644
index 000000000000..3a065c33b83b
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_ae.h
@@ -0,0 +1,108 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef _C2_AE_H_
34#define _C2_AE_H_
35
36/*
37 * WARNING: If you change this file, also bump C2_IVN_BASE
38 * in common/include/clustercore/c2_ivn.h.
39 */
40
41/*
42 * Asynchronous Event Identifiers
43 *
44 * These start at 0x80 only so it's obvious from inspection that
45 * they are not work-request statuses. This isn't critical.
46 *
47 * NOTE: these event id's must fit in eight bits.
48 */
49enum c2_event_id {
50 CCAE_REMOTE_SHUTDOWN = 0x80,
51 CCAE_ACTIVE_CONNECT_RESULTS,
52 CCAE_CONNECTION_REQUEST,
53 CCAE_LLP_CLOSE_COMPLETE,
54 CCAE_TERMINATE_MESSAGE_RECEIVED,
55 CCAE_LLP_CONNECTION_RESET,
56 CCAE_LLP_CONNECTION_LOST,
57 CCAE_LLP_SEGMENT_SIZE_INVALID,
58 CCAE_LLP_INVALID_CRC,
59 CCAE_LLP_BAD_FPDU,
60 CCAE_INVALID_DDP_VERSION,
61 CCAE_INVALID_RDMA_VERSION,
62 CCAE_UNEXPECTED_OPCODE,
63 CCAE_INVALID_DDP_QUEUE_NUMBER,
64 CCAE_RDMA_READ_NOT_ENABLED,
65 CCAE_RDMA_WRITE_NOT_ENABLED,
66 CCAE_RDMA_READ_TOO_SMALL,
67 CCAE_NO_L_BIT,
68 CCAE_TAGGED_INVALID_STAG,
69 CCAE_TAGGED_BASE_BOUNDS_VIOLATION,
70 CCAE_TAGGED_ACCESS_RIGHTS_VIOLATION,
71 CCAE_TAGGED_INVALID_PD,
72 CCAE_WRAP_ERROR,
73 CCAE_BAD_CLOSE,
74 CCAE_BAD_LLP_CLOSE,
75 CCAE_INVALID_MSN_RANGE,
76 CCAE_INVALID_MSN_GAP,
77 CCAE_IRRQ_OVERFLOW,
78 CCAE_IRRQ_MSN_GAP,
79 CCAE_IRRQ_MSN_RANGE,
80 CCAE_IRRQ_INVALID_STAG,
81 CCAE_IRRQ_BASE_BOUNDS_VIOLATION,
82 CCAE_IRRQ_ACCESS_RIGHTS_VIOLATION,
83 CCAE_IRRQ_INVALID_PD,
84 CCAE_IRRQ_WRAP_ERROR,
85 CCAE_CQ_SQ_COMPLETION_OVERFLOW,
86 CCAE_CQ_RQ_COMPLETION_ERROR,
87 CCAE_QP_SRQ_WQE_ERROR,
88 CCAE_QP_LOCAL_CATASTROPHIC_ERROR,
89 CCAE_CQ_OVERFLOW,
90 CCAE_CQ_OPERATION_ERROR,
91 CCAE_SRQ_LIMIT_REACHED,
92 CCAE_QP_RQ_LIMIT_REACHED,
93 CCAE_SRQ_CATASTROPHIC_ERROR,
94 CCAE_RNIC_CATASTROPHIC_ERROR
95/* WARNING If you add more id's, make sure their values fit in eight bits. */
96};
97
98/*
99 * Resource Indicators and Identifiers
100 */
101enum c2_resource_indicator {
102 C2_RES_IND_QP = 1,
103 C2_RES_IND_EP,
104 C2_RES_IND_CQ,
105 C2_RES_IND_SRQ,
106};
107
108#endif /* _C2_AE_H_ */
diff --git a/drivers/infiniband/hw/amso1100/c2_alloc.c b/drivers/infiniband/hw/amso1100/c2_alloc.c
new file mode 100644
index 000000000000..1d2529992c0c
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_alloc.c
@@ -0,0 +1,144 @@
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/errno.h>
35#include <linux/slab.h>
36#include <linux/bitmap.h>
37
38#include "c2.h"
39
40static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask,
41 struct sp_chunk **head)
42{
43 int i;
44 struct sp_chunk *new_head;
45
46 new_head = (struct sp_chunk *) __get_free_page(gfp_mask);
47 if (new_head == NULL)
48 return -ENOMEM;
49
50 new_head->dma_addr = dma_map_single(c2dev->ibdev.dma_device, new_head,
51 PAGE_SIZE, DMA_FROM_DEVICE);
52 pci_unmap_addr_set(new_head, mapping, new_head->dma_addr);
53
54 new_head->next = NULL;
55 new_head->head = 0;
56
57 /* build list where each index is the next free slot */
58 for (i = 0;
59 i < (PAGE_SIZE - sizeof(struct sp_chunk) -
60 sizeof(u16)) / sizeof(u16) - 1;
61 i++) {
62 new_head->shared_ptr[i] = i + 1;
63 }
64 /* terminate list */
65 new_head->shared_ptr[i] = 0xFFFF;
66
67 *head = new_head;
68 return 0;
69}
70
71int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
72 struct sp_chunk **root)
73{
74 return c2_alloc_mqsp_chunk(c2dev, gfp_mask, root);
75}
76
77void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
78{
79 struct sp_chunk *next;
80
81 while (root) {
82 next = root->next;
83 dma_unmap_single(c2dev->ibdev.dma_device,
84 pci_unmap_addr(root, mapping), PAGE_SIZE,
85 DMA_FROM_DEVICE);
86 __free_page((struct page *) root);
87 root = next;
88 }
89}
90
91u16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
92 dma_addr_t *dma_addr, gfp_t gfp_mask)
93{
94 u16 mqsp;
95
96 while (head) {
97 mqsp = head->head;
98 if (mqsp != 0xFFFF) {
99 head->head = head->shared_ptr[mqsp];
100 break;
101 } else if (head->next == NULL) {
102 if (c2_alloc_mqsp_chunk(c2dev, gfp_mask, &head->next) ==
103 0) {
104 head = head->next;
105 mqsp = head->head;
106 head->head = head->shared_ptr[mqsp];
107 break;
108 } else
109 return NULL;
110 } else
111 head = head->next;
112 }
113 if (head) {
114 *dma_addr = head->dma_addr +
115 ((unsigned long) &(head->shared_ptr[mqsp]) -
116 (unsigned long) head);
117 pr_debug("%s addr %p dma_addr %llx\n", __FUNCTION__,
118 &(head->shared_ptr[mqsp]), (u64)*dma_addr);
119 return &(head->shared_ptr[mqsp]);
120 }
121 return NULL;
122}
123
124void c2_free_mqsp(u16 * mqsp)
125{
126 struct sp_chunk *head;
127 u16 idx;
128
129 /* The chunk containing this ptr begins at the page boundary */
130 head = (struct sp_chunk *) ((unsigned long) mqsp & PAGE_MASK);
131
132 /* Link head to new mqsp */
133 *mqsp = head->head;
134
135 /* Compute the shared_ptr index */
136 idx = ((unsigned long) mqsp & ~PAGE_MASK) >> 1;
137 idx -= (unsigned long) &(((struct sp_chunk *) 0)->shared_ptr[0]) >> 1;
138
139 /* Point this index at the head */
140 head->shared_ptr[idx] = head->head;
141
142 /* Point head at this index */
143 head->head = idx;
144}
diff --git a/drivers/infiniband/hw/amso1100/c2_cm.c b/drivers/infiniband/hw/amso1100/c2_cm.c
new file mode 100644
index 000000000000..485254efdd1e
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_cm.c
@@ -0,0 +1,452 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 */
34#include "c2.h"
35#include "c2_wr.h"
36#include "c2_vq.h"
37#include <rdma/iw_cm.h>
38
39int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
40{
41 struct c2_dev *c2dev = to_c2dev(cm_id->device);
42 struct ib_qp *ibqp;
43 struct c2_qp *qp;
44 struct c2wr_qp_connect_req *wr; /* variable size needs a malloc. */
45 struct c2_vq_req *vq_req;
46 int err;
47
48 ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
49 if (!ibqp)
50 return -EINVAL;
51 qp = to_c2qp(ibqp);
52
53 /* Associate QP <--> CM_ID */
54 cm_id->provider_data = qp;
55 cm_id->add_ref(cm_id);
56 qp->cm_id = cm_id;
57
58 /*
59 * only support the max private_data length
60 */
61 if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
62 err = -EINVAL;
63 goto bail0;
64 }
65 /*
66 * Set the rdma read limits
67 */
68 err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
69 if (err)
70 goto bail0;
71
72 /*
73 * Create and send a WR_QP_CONNECT...
74 */
75 wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
76 if (!wr) {
77 err = -ENOMEM;
78 goto bail0;
79 }
80
81 vq_req = vq_req_alloc(c2dev);
82 if (!vq_req) {
83 err = -ENOMEM;
84 goto bail1;
85 }
86
87 c2_wr_set_id(wr, CCWR_QP_CONNECT);
88 wr->hdr.context = 0;
89 wr->rnic_handle = c2dev->adapter_handle;
90 wr->qp_handle = qp->adapter_handle;
91
92 wr->remote_addr = cm_id->remote_addr.sin_addr.s_addr;
93 wr->remote_port = cm_id->remote_addr.sin_port;
94
95 /*
96 * Move any private data from the callers's buf into
97 * the WR.
98 */
99 if (iw_param->private_data) {
100 wr->private_data_length =
101 cpu_to_be32(iw_param->private_data_len);
102 memcpy(&wr->private_data[0], iw_param->private_data,
103 iw_param->private_data_len);
104 } else
105 wr->private_data_length = 0;
106
107 /*
108 * Send WR to adapter. NOTE: There is no synch reply from
109 * the adapter.
110 */
111 err = vq_send_wr(c2dev, (union c2wr *) wr);
112 vq_req_free(c2dev, vq_req);
113
114 bail1:
115 kfree(wr);
116 bail0:
117 if (err) {
118 /*
119 * If we fail, release reference on QP and
120 * disassociate QP from CM_ID
121 */
122 cm_id->provider_data = NULL;
123 qp->cm_id = NULL;
124 cm_id->rem_ref(cm_id);
125 }
126 return err;
127}
128
129int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog)
130{
131 struct c2_dev *c2dev;
132 struct c2wr_ep_listen_create_req wr;
133 struct c2wr_ep_listen_create_rep *reply;
134 struct c2_vq_req *vq_req;
135 int err;
136
137 c2dev = to_c2dev(cm_id->device);
138 if (c2dev == NULL)
139 return -EINVAL;
140
141 /*
142 * Allocate verbs request.
143 */
144 vq_req = vq_req_alloc(c2dev);
145 if (!vq_req)
146 return -ENOMEM;
147
148 /*
149 * Build the WR
150 */
151 c2_wr_set_id(&wr, CCWR_EP_LISTEN_CREATE);
152 wr.hdr.context = (u64) (unsigned long) vq_req;
153 wr.rnic_handle = c2dev->adapter_handle;
154 wr.local_addr = cm_id->local_addr.sin_addr.s_addr;
155 wr.local_port = cm_id->local_addr.sin_port;
156 wr.backlog = cpu_to_be32(backlog);
157 wr.user_context = (u64) (unsigned long) cm_id;
158
159 /*
160 * Reference the request struct. Dereferenced in the int handler.
161 */
162 vq_req_get(c2dev, vq_req);
163
164 /*
165 * Send WR to adapter
166 */
167 err = vq_send_wr(c2dev, (union c2wr *) & wr);
168 if (err) {
169 vq_req_put(c2dev, vq_req);
170 goto bail0;
171 }
172
173 /*
174 * Wait for reply from adapter
175 */
176 err = vq_wait_for_reply(c2dev, vq_req);
177 if (err)
178 goto bail0;
179
180 /*
181 * Process reply
182 */
183 reply =
184 (struct c2wr_ep_listen_create_rep *) (unsigned long) vq_req->reply_msg;
185 if (!reply) {
186 err = -ENOMEM;
187 goto bail1;
188 }
189
190 if ((err = c2_errno(reply)) != 0)
191 goto bail1;
192
193 /*
194 * Keep the adapter handle. Used in subsequent destroy
195 */
196 cm_id->provider_data = (void*)(unsigned long) reply->ep_handle;
197
198 /*
199 * free vq stuff
200 */
201 vq_repbuf_free(c2dev, reply);
202 vq_req_free(c2dev, vq_req);
203
204 return 0;
205
206 bail1:
207 vq_repbuf_free(c2dev, reply);
208 bail0:
209 vq_req_free(c2dev, vq_req);
210 return err;
211}
212
213
214int c2_llp_service_destroy(struct iw_cm_id *cm_id)
215{
216
217 struct c2_dev *c2dev;
218 struct c2wr_ep_listen_destroy_req wr;
219 struct c2wr_ep_listen_destroy_rep *reply;
220 struct c2_vq_req *vq_req;
221 int err;
222
223 c2dev = to_c2dev(cm_id->device);
224 if (c2dev == NULL)
225 return -EINVAL;
226
227 /*
228 * Allocate verbs request.
229 */
230 vq_req = vq_req_alloc(c2dev);
231 if (!vq_req)
232 return -ENOMEM;
233
234 /*
235 * Build the WR
236 */
237 c2_wr_set_id(&wr, CCWR_EP_LISTEN_DESTROY);
238 wr.hdr.context = (unsigned long) vq_req;
239 wr.rnic_handle = c2dev->adapter_handle;
240 wr.ep_handle = (u32)(unsigned long)cm_id->provider_data;
241
242 /*
243 * reference the request struct. dereferenced in the int handler.
244 */
245 vq_req_get(c2dev, vq_req);
246
247 /*
248 * Send WR to adapter
249 */
250 err = vq_send_wr(c2dev, (union c2wr *) & wr);
251 if (err) {
252 vq_req_put(c2dev, vq_req);
253 goto bail0;
254 }
255
256 /*
257 * Wait for reply from adapter
258 */
259 err = vq_wait_for_reply(c2dev, vq_req);
260 if (err)
261 goto bail0;
262
263 /*
264 * Process reply
265 */
266 reply=(struct c2wr_ep_listen_destroy_rep *)(unsigned long)vq_req->reply_msg;
267 if (!reply) {
268 err = -ENOMEM;
269 goto bail0;
270 }
271 if ((err = c2_errno(reply)) != 0)
272 goto bail1;
273
274 bail1:
275 vq_repbuf_free(c2dev, reply);
276 bail0:
277 vq_req_free(c2dev, vq_req);
278 return err;
279}
280
281int c2_llp_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
282{
283 struct c2_dev *c2dev = to_c2dev(cm_id->device);
284 struct c2_qp *qp;
285 struct ib_qp *ibqp;
286 struct c2wr_cr_accept_req *wr; /* variable length WR */
287 struct c2_vq_req *vq_req;
288 struct c2wr_cr_accept_rep *reply; /* VQ Reply msg ptr. */
289 int err;
290
291 ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
292 if (!ibqp)
293 return -EINVAL;
294 qp = to_c2qp(ibqp);
295
296 /* Set the RDMA read limits */
297 err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
298 if (err)
299 goto bail0;
300
301 /* Allocate verbs request. */
302 vq_req = vq_req_alloc(c2dev);
303 if (!vq_req) {
304 err = -ENOMEM;
305 goto bail1;
306 }
307 vq_req->qp = qp;
308 vq_req->cm_id = cm_id;
309 vq_req->event = IW_CM_EVENT_ESTABLISHED;
310
311 wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
312 if (!wr) {
313 err = -ENOMEM;
314 goto bail2;
315 }
316
317 /* Build the WR */
318 c2_wr_set_id(wr, CCWR_CR_ACCEPT);
319 wr->hdr.context = (unsigned long) vq_req;
320 wr->rnic_handle = c2dev->adapter_handle;
321 wr->ep_handle = (u32) (unsigned long) cm_id->provider_data;
322 wr->qp_handle = qp->adapter_handle;
323
324 /* Replace the cr_handle with the QP after accept */
325 cm_id->provider_data = qp;
326 cm_id->add_ref(cm_id);
327 qp->cm_id = cm_id;
328
329 cm_id->provider_data = qp;
330
331 /* Validate private_data length */
332 if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
333 err = -EINVAL;
334 goto bail2;
335 }
336
337 if (iw_param->private_data) {
338 wr->private_data_length = cpu_to_be32(iw_param->private_data_len);
339 memcpy(&wr->private_data[0],
340 iw_param->private_data, iw_param->private_data_len);
341 } else
342 wr->private_data_length = 0;
343
344 /* Reference the request struct. Dereferenced in the int handler. */
345 vq_req_get(c2dev, vq_req);
346
347 /* Send WR to adapter */
348 err = vq_send_wr(c2dev, (union c2wr *) wr);
349 if (err) {
350 vq_req_put(c2dev, vq_req);
351 goto bail2;
352 }
353
354 /* Wait for reply from adapter */
355 err = vq_wait_for_reply(c2dev, vq_req);
356 if (err)
357 goto bail2;
358
359 /* Check that reply is present */
360 reply = (struct c2wr_cr_accept_rep *) (unsigned long) vq_req->reply_msg;
361 if (!reply) {
362 err = -ENOMEM;
363 goto bail2;
364 }
365
366 err = c2_errno(reply);
367 vq_repbuf_free(c2dev, reply);
368
369 if (!err)
370 c2_set_qp_state(qp, C2_QP_STATE_RTS);
371 bail2:
372 kfree(wr);
373 bail1:
374 vq_req_free(c2dev, vq_req);
375 bail0:
376 if (err) {
377 /*
378 * If we fail, release reference on QP and
379 * disassociate QP from CM_ID
380 */
381 cm_id->provider_data = NULL;
382 qp->cm_id = NULL;
383 cm_id->rem_ref(cm_id);
384 }
385 return err;
386}
387
388int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
389{
390 struct c2_dev *c2dev;
391 struct c2wr_cr_reject_req wr;
392 struct c2_vq_req *vq_req;
393 struct c2wr_cr_reject_rep *reply;
394 int err;
395
396 c2dev = to_c2dev(cm_id->device);
397
398 /*
399 * Allocate verbs request.
400 */
401 vq_req = vq_req_alloc(c2dev);
402 if (!vq_req)
403 return -ENOMEM;
404
405 /*
406 * Build the WR
407 */
408 c2_wr_set_id(&wr, CCWR_CR_REJECT);
409 wr.hdr.context = (unsigned long) vq_req;
410 wr.rnic_handle = c2dev->adapter_handle;
411 wr.ep_handle = (u32) (unsigned long) cm_id->provider_data;
412
413 /*
414 * reference the request struct. dereferenced in the int handler.
415 */
416 vq_req_get(c2dev, vq_req);
417
418 /*
419 * Send WR to adapter
420 */
421 err = vq_send_wr(c2dev, (union c2wr *) & wr);
422 if (err) {
423 vq_req_put(c2dev, vq_req);
424 goto bail0;
425 }
426
427 /*
428 * Wait for reply from adapter
429 */
430 err = vq_wait_for_reply(c2dev, vq_req);
431 if (err)
432 goto bail0;
433
434 /*
435 * Process reply
436 */
437 reply = (struct c2wr_cr_reject_rep *) (unsigned long)
438 vq_req->reply_msg;
439 if (!reply) {
440 err = -ENOMEM;
441 goto bail0;
442 }
443 err = c2_errno(reply);
444 /*
445 * free vq stuff
446 */
447 vq_repbuf_free(c2dev, reply);
448
449 bail0:
450 vq_req_free(c2dev, vq_req);
451 return err;
452}
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
new file mode 100644
index 000000000000..9d7bcc5ade93
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -0,0 +1,433 @@
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
8 *
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
14 *
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
17 * conditions are met:
18 *
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer.
22 *
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * SOFTWARE.
36 *
37 */
38#include "c2.h"
39#include "c2_vq.h"
40#include "c2_status.h"
41
42#define C2_CQ_MSG_SIZE ((sizeof(struct c2wr_ce) + 32-1) & ~(32-1))
43
44static struct c2_cq *c2_cq_get(struct c2_dev *c2dev, int cqn)
45{
46 struct c2_cq *cq;
47 unsigned long flags;
48
49 spin_lock_irqsave(&c2dev->lock, flags);
50 cq = c2dev->qptr_array[cqn];
51 if (!cq) {
52 spin_unlock_irqrestore(&c2dev->lock, flags);
53 return NULL;
54 }
55 atomic_inc(&cq->refcount);
56 spin_unlock_irqrestore(&c2dev->lock, flags);
57 return cq;
58}
59
60static void c2_cq_put(struct c2_cq *cq)
61{
62 if (atomic_dec_and_test(&cq->refcount))
63 wake_up(&cq->wait);
64}
65
66void c2_cq_event(struct c2_dev *c2dev, u32 mq_index)
67{
68 struct c2_cq *cq;
69
70 cq = c2_cq_get(c2dev, mq_index);
71 if (!cq) {
72 printk("discarding events on destroyed CQN=%d\n", mq_index);
73 return;
74 }
75
76 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
77 c2_cq_put(cq);
78}
79
80void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index)
81{
82 struct c2_cq *cq;
83 struct c2_mq *q;
84
85 cq = c2_cq_get(c2dev, mq_index);
86 if (!cq)
87 return;
88
89 spin_lock_irq(&cq->lock);
90 q = &cq->mq;
91 if (q && !c2_mq_empty(q)) {
92 u16 priv = q->priv;
93 struct c2wr_ce *msg;
94
95 while (priv != be16_to_cpu(*q->shared)) {
96 msg = (struct c2wr_ce *)
97 (q->msg_pool.host + priv * q->msg_size);
98 if (msg->qp_user_context == (u64) (unsigned long) qp) {
99 msg->qp_user_context = (u64) 0;
100 }
101 priv = (priv + 1) % q->q_size;
102 }
103 }
104 spin_unlock_irq(&cq->lock);
105 c2_cq_put(cq);
106}
107
108static inline enum ib_wc_status c2_cqe_status_to_openib(u8 status)
109{
110 switch (status) {
111 case C2_OK:
112 return IB_WC_SUCCESS;
113 case CCERR_FLUSHED:
114 return IB_WC_WR_FLUSH_ERR;
115 case CCERR_BASE_AND_BOUNDS_VIOLATION:
116 return IB_WC_LOC_PROT_ERR;
117 case CCERR_ACCESS_VIOLATION:
118 return IB_WC_LOC_ACCESS_ERR;
119 case CCERR_TOTAL_LENGTH_TOO_BIG:
120 return IB_WC_LOC_LEN_ERR;
121 case CCERR_INVALID_WINDOW:
122 return IB_WC_MW_BIND_ERR;
123 default:
124 return IB_WC_GENERAL_ERR;
125 }
126}
127
128
129static inline int c2_poll_one(struct c2_dev *c2dev,
130 struct c2_cq *cq, struct ib_wc *entry)
131{
132 struct c2wr_ce *ce;
133 struct c2_qp *qp;
134 int is_recv = 0;
135
136 ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq);
137 if (!ce) {
138 return -EAGAIN;
139 }
140
141 /*
142 * if the qp returned is null then this qp has already
143 * been freed and we are unable process the completion.
144 * try pulling the next message
145 */
146 while ((qp =
147 (struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) {
148 c2_mq_free(&cq->mq);
149 ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq);
150 if (!ce)
151 return -EAGAIN;
152 }
153
154 entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce));
155 entry->wr_id = ce->hdr.context;
156 entry->qp_num = ce->handle;
157 entry->wc_flags = 0;
158 entry->slid = 0;
159 entry->sl = 0;
160 entry->src_qp = 0;
161 entry->dlid_path_bits = 0;
162 entry->pkey_index = 0;
163
164 switch (c2_wr_get_id(ce)) {
165 case C2_WR_TYPE_SEND:
166 entry->opcode = IB_WC_SEND;
167 break;
168 case C2_WR_TYPE_RDMA_WRITE:
169 entry->opcode = IB_WC_RDMA_WRITE;
170 break;
171 case C2_WR_TYPE_RDMA_READ:
172 entry->opcode = IB_WC_RDMA_READ;
173 break;
174 case C2_WR_TYPE_BIND_MW:
175 entry->opcode = IB_WC_BIND_MW;
176 break;
177 case C2_WR_TYPE_RECV:
178 entry->byte_len = be32_to_cpu(ce->bytes_rcvd);
179 entry->opcode = IB_WC_RECV;
180 is_recv = 1;
181 break;
182 default:
183 break;
184 }
185
186 /* consume the WQEs */
187 if (is_recv)
188 c2_mq_lconsume(&qp->rq_mq, 1);
189 else
190 c2_mq_lconsume(&qp->sq_mq,
191 be32_to_cpu(c2_wr_get_wqe_count(ce)) + 1);
192
193 /* free the message */
194 c2_mq_free(&cq->mq);
195
196 return 0;
197}
198
199int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
200{
201 struct c2_dev *c2dev = to_c2dev(ibcq->device);
202 struct c2_cq *cq = to_c2cq(ibcq);
203 unsigned long flags;
204 int npolled, err;
205
206 spin_lock_irqsave(&cq->lock, flags);
207
208 for (npolled = 0; npolled < num_entries; ++npolled) {
209
210 err = c2_poll_one(c2dev, cq, entry + npolled);
211 if (err)
212 break;
213 }
214
215 spin_unlock_irqrestore(&cq->lock, flags);
216
217 return npolled;
218}
219
220int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
221{
222 struct c2_mq_shared __iomem *shared;
223 struct c2_cq *cq;
224
225 cq = to_c2cq(ibcq);
226 shared = cq->mq.peer;
227
228 if (notify == IB_CQ_NEXT_COMP)
229 writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type);
230 else if (notify == IB_CQ_SOLICITED)
231 writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type);
232 else
233 return -EINVAL;
234
235 writeb(CQ_WAIT_FOR_DMA | CQ_ARMED, &shared->armed);
236
237 /*
238 * Now read back shared->armed to make the PCI
239 * write synchronous. This is necessary for
240 * correct cq notification semantics.
241 */
242 readb(&shared->armed);
243
244 return 0;
245}
246
247static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
248{
249
250 dma_unmap_single(c2dev->ibdev.dma_device, pci_unmap_addr(mq, mapping),
251 mq->q_size * mq->msg_size, DMA_FROM_DEVICE);
252 free_pages((unsigned long) mq->msg_pool.host,
253 get_order(mq->q_size * mq->msg_size));
254}
255
256static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
257 int msg_size)
258{
259 unsigned long pool_start;
260
261 pool_start = __get_free_pages(GFP_KERNEL,
262 get_order(q_size * msg_size));
263 if (!pool_start)
264 return -ENOMEM;
265
266 c2_mq_rep_init(mq,
267 0, /* index (currently unknown) */
268 q_size,
269 msg_size,
270 (u8 *) pool_start,
271 NULL, /* peer (currently unknown) */
272 C2_MQ_HOST_TARGET);
273
274 mq->host_dma = dma_map_single(c2dev->ibdev.dma_device,
275 (void *)pool_start,
276 q_size * msg_size, DMA_FROM_DEVICE);
277 pci_unmap_addr_set(mq, mapping, mq->host_dma);
278
279 return 0;
280}
281
282int c2_init_cq(struct c2_dev *c2dev, int entries,
283 struct c2_ucontext *ctx, struct c2_cq *cq)
284{
285 struct c2wr_cq_create_req wr;
286 struct c2wr_cq_create_rep *reply;
287 unsigned long peer_pa;
288 struct c2_vq_req *vq_req;
289 int err;
290
291 might_sleep();
292
293 cq->ibcq.cqe = entries - 1;
294 cq->is_kernel = !ctx;
295
296 /* Allocate a shared pointer */
297 cq->mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
298 &cq->mq.shared_dma, GFP_KERNEL);
299 if (!cq->mq.shared)
300 return -ENOMEM;
301
302 /* Allocate pages for the message pool */
303 err = c2_alloc_cq_buf(c2dev, &cq->mq, entries + 1, C2_CQ_MSG_SIZE);
304 if (err)
305 goto bail0;
306
307 vq_req = vq_req_alloc(c2dev);
308 if (!vq_req) {
309 err = -ENOMEM;
310 goto bail1;
311 }
312
313 memset(&wr, 0, sizeof(wr));
314 c2_wr_set_id(&wr, CCWR_CQ_CREATE);
315 wr.hdr.context = (unsigned long) vq_req;
316 wr.rnic_handle = c2dev->adapter_handle;
317 wr.msg_size = cpu_to_be32(cq->mq.msg_size);
318 wr.depth = cpu_to_be32(cq->mq.q_size);
319 wr.shared_ht = cpu_to_be64(cq->mq.shared_dma);
320 wr.msg_pool = cpu_to_be64(cq->mq.host_dma);
321 wr.user_context = (u64) (unsigned long) (cq);
322
323 vq_req_get(c2dev, vq_req);
324
325 err = vq_send_wr(c2dev, (union c2wr *) & wr);
326 if (err) {
327 vq_req_put(c2dev, vq_req);
328 goto bail2;
329 }
330
331 err = vq_wait_for_reply(c2dev, vq_req);
332 if (err)
333 goto bail2;
334
335 reply = (struct c2wr_cq_create_rep *) (unsigned long) (vq_req->reply_msg);
336 if (!reply) {
337 err = -ENOMEM;
338 goto bail2;
339 }
340
341 if ((err = c2_errno(reply)) != 0)
342 goto bail3;
343
344 cq->adapter_handle = reply->cq_handle;
345 cq->mq.index = be32_to_cpu(reply->mq_index);
346
347 peer_pa = c2dev->pa + be32_to_cpu(reply->adapter_shared);
348 cq->mq.peer = ioremap_nocache(peer_pa, PAGE_SIZE);
349 if (!cq->mq.peer) {
350 err = -ENOMEM;
351 goto bail3;
352 }
353
354 vq_repbuf_free(c2dev, reply);
355 vq_req_free(c2dev, vq_req);
356
357 spin_lock_init(&cq->lock);
358 atomic_set(&cq->refcount, 1);
359 init_waitqueue_head(&cq->wait);
360
361 /*
362 * Use the MQ index allocated by the adapter to
363 * store the CQ in the qptr_array
364 */
365 cq->cqn = cq->mq.index;
366 c2dev->qptr_array[cq->cqn] = cq;
367
368 return 0;
369
370 bail3:
371 vq_repbuf_free(c2dev, reply);
372 bail2:
373 vq_req_free(c2dev, vq_req);
374 bail1:
375 c2_free_cq_buf(c2dev, &cq->mq);
376 bail0:
377 c2_free_mqsp(cq->mq.shared);
378
379 return err;
380}
381
382void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq)
383{
384 int err;
385 struct c2_vq_req *vq_req;
386 struct c2wr_cq_destroy_req wr;
387 struct c2wr_cq_destroy_rep *reply;
388
389 might_sleep();
390
391 /* Clear CQ from the qptr array */
392 spin_lock_irq(&c2dev->lock);
393 c2dev->qptr_array[cq->mq.index] = NULL;
394 atomic_dec(&cq->refcount);
395 spin_unlock_irq(&c2dev->lock);
396
397 wait_event(cq->wait, !atomic_read(&cq->refcount));
398
399 vq_req = vq_req_alloc(c2dev);
400 if (!vq_req) {
401 goto bail0;
402 }
403
404 memset(&wr, 0, sizeof(wr));
405 c2_wr_set_id(&wr, CCWR_CQ_DESTROY);
406 wr.hdr.context = (unsigned long) vq_req;
407 wr.rnic_handle = c2dev->adapter_handle;
408 wr.cq_handle = cq->adapter_handle;
409
410 vq_req_get(c2dev, vq_req);
411
412 err = vq_send_wr(c2dev, (union c2wr *) & wr);
413 if (err) {
414 vq_req_put(c2dev, vq_req);
415 goto bail1;
416 }
417
418 err = vq_wait_for_reply(c2dev, vq_req);
419 if (err)
420 goto bail1;
421
422 reply = (struct c2wr_cq_destroy_rep *) (unsigned long) (vq_req->reply_msg);
423
424 vq_repbuf_free(c2dev, reply);
425 bail1:
426 vq_req_free(c2dev, vq_req);
427 bail0:
428 if (cq->is_kernel) {
429 c2_free_cq_buf(c2dev, &cq->mq);
430 }
431
432 return;
433}
diff --git a/drivers/infiniband/hw/amso1100/c2_intr.c b/drivers/infiniband/hw/amso1100/c2_intr.c
new file mode 100644
index 000000000000..0d0bc33ca30a
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_intr.c
@@ -0,0 +1,209 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#include "c2.h"
34#include <rdma/iw_cm.h>
35#include "c2_vq.h"
36
37static void handle_mq(struct c2_dev *c2dev, u32 index);
38static void handle_vq(struct c2_dev *c2dev, u32 mq_index);
39
40/*
41 * Handle RNIC interrupts
42 */
43void c2_rnic_interrupt(struct c2_dev *c2dev)
44{
45 unsigned int mq_index;
46
47 while (c2dev->hints_read != be16_to_cpu(*c2dev->hint_count)) {
48 mq_index = readl(c2dev->regs + PCI_BAR0_HOST_HINT);
49 if (mq_index & 0x80000000) {
50 break;
51 }
52
53 c2dev->hints_read++;
54 handle_mq(c2dev, mq_index);
55 }
56
57}
58
59/*
60 * Top level MQ handler
61 */
62static void handle_mq(struct c2_dev *c2dev, u32 mq_index)
63{
64 if (c2dev->qptr_array[mq_index] == NULL) {
65 pr_debug(KERN_INFO "handle_mq: stray activity for mq_index=%d\n",
66 mq_index);
67 return;
68 }
69
70 switch (mq_index) {
71 case (0):
72 /*
73 * An index of 0 in the activity queue
74 * indicates the req vq now has messages
75 * available...
76 *
77 * Wake up any waiters waiting on req VQ
78 * message availability.
79 */
80 wake_up(&c2dev->req_vq_wo);
81 break;
82 case (1):
83 handle_vq(c2dev, mq_index);
84 break;
85 case (2):
86 /* We have to purge the VQ in case there are pending
87 * accept reply requests that would result in the
88 * generation of an ESTABLISHED event. If we don't
89 * generate these first, a CLOSE event could end up
90 * being delivered before the ESTABLISHED event.
91 */
92 handle_vq(c2dev, 1);
93
94 c2_ae_event(c2dev, mq_index);
95 break;
96 default:
97 /* There is no event synchronization between CQ events
98 * and AE or CM events. In fact, CQE could be
99 * delivered for all of the I/O up to and including the
100 * FLUSH for a peer disconenct prior to the ESTABLISHED
101 * event being delivered to the app. The reason for this
102 * is that CM events are delivered on a thread, while AE
103 * and CM events are delivered on interrupt context.
104 */
105 c2_cq_event(c2dev, mq_index);
106 break;
107 }
108
109 return;
110}
111
112/*
113 * Handles verbs WR replies.
114 */
115static void handle_vq(struct c2_dev *c2dev, u32 mq_index)
116{
117 void *adapter_msg, *reply_msg;
118 struct c2wr_hdr *host_msg;
119 struct c2wr_hdr tmp;
120 struct c2_mq *reply_vq;
121 struct c2_vq_req *req;
122 struct iw_cm_event cm_event;
123 int err;
124
125 reply_vq = (struct c2_mq *) c2dev->qptr_array[mq_index];
126
127 /*
128 * get next msg from mq_index into adapter_msg.
129 * don't free it yet.
130 */
131 adapter_msg = c2_mq_consume(reply_vq);
132 if (adapter_msg == NULL) {
133 return;
134 }
135
136 host_msg = vq_repbuf_alloc(c2dev);
137
138 /*
139 * If we can't get a host buffer, then we'll still
140 * wakeup the waiter, we just won't give him the msg.
141 * It is assumed the waiter will deal with this...
142 */
143 if (!host_msg) {
144 pr_debug("handle_vq: no repbufs!\n");
145
146 /*
147 * just copy the WR header into a local variable.
148 * this allows us to still demux on the context
149 */
150 host_msg = &tmp;
151 memcpy(host_msg, adapter_msg, sizeof(tmp));
152 reply_msg = NULL;
153 } else {
154 memcpy(host_msg, adapter_msg, reply_vq->msg_size);
155 reply_msg = host_msg;
156 }
157
158 /*
159 * consume the msg from the MQ
160 */
161 c2_mq_free(reply_vq);
162
163 /*
164 * wakeup the waiter.
165 */
166 req = (struct c2_vq_req *) (unsigned long) host_msg->context;
167 if (req == NULL) {
168 /*
169 * We should never get here, as the adapter should
170 * never send us a reply that we're not expecting.
171 */
172 vq_repbuf_free(c2dev, host_msg);
173 pr_debug("handle_vq: UNEXPECTEDLY got NULL req\n");
174 return;
175 }
176
177 err = c2_errno(reply_msg);
178 if (!err) switch (req->event) {
179 case IW_CM_EVENT_ESTABLISHED:
180 c2_set_qp_state(req->qp,
181 C2_QP_STATE_RTS);
182 case IW_CM_EVENT_CLOSE:
183
184 /*
185 * Move the QP to RTS if this is
186 * the established event
187 */
188 cm_event.event = req->event;
189 cm_event.status = 0;
190 cm_event.local_addr = req->cm_id->local_addr;
191 cm_event.remote_addr = req->cm_id->remote_addr;
192 cm_event.private_data = NULL;
193 cm_event.private_data_len = 0;
194 req->cm_id->event_handler(req->cm_id, &cm_event);
195 break;
196 default:
197 break;
198 }
199
200 req->reply_msg = (u64) (unsigned long) (reply_msg);
201 atomic_set(&req->reply_ready, 1);
202 wake_up(&req->wait_object);
203
204 /*
205 * If the request was cancelled, then this put will
206 * free the vq_req memory...and reply_msg!!!
207 */
208 vq_req_put(c2dev, req);
209}
diff --git a/drivers/infiniband/hw/amso1100/c2_mm.c b/drivers/infiniband/hw/amso1100/c2_mm.c
new file mode 100644
index 000000000000..1e4f46493fcb
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_mm.c
@@ -0,0 +1,375 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#include "c2.h"
34#include "c2_vq.h"
35
36#define PBL_VIRT 1
37#define PBL_PHYS 2
38
39/*
40 * Send all the PBL messages to convey the remainder of the PBL
41 * Wait for the adapter's reply on the last one.
42 * This is indicated by setting the MEM_PBL_COMPLETE in the flags.
43 *
44 * NOTE: vq_req is _not_ freed by this function. The VQ Host
45 * Reply buffer _is_ freed by this function.
46 */
47static int
48send_pbl_messages(struct c2_dev *c2dev, u32 stag_index,
49 unsigned long va, u32 pbl_depth,
50 struct c2_vq_req *vq_req, int pbl_type)
51{
52 u32 pbe_count; /* amt that fits in a PBL msg */
53 u32 count; /* amt in this PBL MSG. */
54 struct c2wr_nsmr_pbl_req *wr; /* PBL WR ptr */
55 struct c2wr_nsmr_pbl_rep *reply; /* reply ptr */
56 int err, pbl_virt, pbl_index, i;
57
58 switch (pbl_type) {
59 case PBL_VIRT:
60 pbl_virt = 1;
61 break;
62 case PBL_PHYS:
63 pbl_virt = 0;
64 break;
65 default:
66 return -EINVAL;
67 break;
68 }
69
70 pbe_count = (c2dev->req_vq.msg_size -
71 sizeof(struct c2wr_nsmr_pbl_req)) / sizeof(u64);
72 wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
73 if (!wr) {
74 return -ENOMEM;
75 }
76 c2_wr_set_id(wr, CCWR_NSMR_PBL);
77
78 /*
79 * Only the last PBL message will generate a reply from the verbs,
80 * so we set the context to 0 indicating there is no kernel verbs
81 * handler blocked awaiting this reply.
82 */
83 wr->hdr.context = 0;
84 wr->rnic_handle = c2dev->adapter_handle;
85 wr->stag_index = stag_index; /* already swapped */
86 wr->flags = 0;
87 pbl_index = 0;
88 while (pbl_depth) {
89 count = min(pbe_count, pbl_depth);
90 wr->addrs_length = cpu_to_be32(count);
91
92 /*
93 * If this is the last message, then reference the
94 * vq request struct cuz we're gonna wait for a reply.
95 * also make this PBL msg as the last one.
96 */
97 if (count == pbl_depth) {
98 /*
99 * reference the request struct. dereferenced in the
100 * int handler.
101 */
102 vq_req_get(c2dev, vq_req);
103 wr->flags = cpu_to_be32(MEM_PBL_COMPLETE);
104
105 /*
106 * This is the last PBL message.
107 * Set the context to our VQ Request Object so we can
108 * wait for the reply.
109 */
110 wr->hdr.context = (unsigned long) vq_req;
111 }
112
113 /*
114 * If pbl_virt is set then va is a virtual address
115 * that describes a virtually contiguous memory
116 * allocation. The wr needs the start of each virtual page
117 * to be converted to the corresponding physical address
118 * of the page. If pbl_virt is not set then va is an array
119 * of physical addresses and there is no conversion to do.
120 * Just fill in the wr with what is in the array.
121 */
122 for (i = 0; i < count; i++) {
123 if (pbl_virt) {
124 va += PAGE_SIZE;
125 } else {
126 wr->paddrs[i] =
127 cpu_to_be64(((u64 *)va)[pbl_index + i]);
128 }
129 }
130
131 /*
132 * Send WR to adapter
133 */
134 err = vq_send_wr(c2dev, (union c2wr *) wr);
135 if (err) {
136 if (count <= pbe_count) {
137 vq_req_put(c2dev, vq_req);
138 }
139 goto bail0;
140 }
141 pbl_depth -= count;
142 pbl_index += count;
143 }
144
145 /*
146 * Now wait for the reply...
147 */
148 err = vq_wait_for_reply(c2dev, vq_req);
149 if (err) {
150 goto bail0;
151 }
152
153 /*
154 * Process reply
155 */
156 reply = (struct c2wr_nsmr_pbl_rep *) (unsigned long) vq_req->reply_msg;
157 if (!reply) {
158 err = -ENOMEM;
159 goto bail0;
160 }
161
162 err = c2_errno(reply);
163
164 vq_repbuf_free(c2dev, reply);
165 bail0:
166 kfree(wr);
167 return err;
168}
169
170#define C2_PBL_MAX_DEPTH 131072
171int
172c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list,
173 int page_size, int pbl_depth, u32 length,
174 u32 offset, u64 *va, enum c2_acf acf,
175 struct c2_mr *mr)
176{
177 struct c2_vq_req *vq_req;
178 struct c2wr_nsmr_register_req *wr;
179 struct c2wr_nsmr_register_rep *reply;
180 u16 flags;
181 int i, pbe_count, count;
182 int err;
183
184 if (!va || !length || !addr_list || !pbl_depth)
185 return -EINTR;
186
187 /*
188 * Verify PBL depth is within rnic max
189 */
190 if (pbl_depth > C2_PBL_MAX_DEPTH) {
191 return -EINTR;
192 }
193
194 /*
195 * allocate verbs request object
196 */
197 vq_req = vq_req_alloc(c2dev);
198 if (!vq_req)
199 return -ENOMEM;
200
201 wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
202 if (!wr) {
203 err = -ENOMEM;
204 goto bail0;
205 }
206
207 /*
208 * build the WR
209 */
210 c2_wr_set_id(wr, CCWR_NSMR_REGISTER);
211 wr->hdr.context = (unsigned long) vq_req;
212 wr->rnic_handle = c2dev->adapter_handle;
213
214 flags = (acf | MEM_VA_BASED | MEM_REMOTE);
215
216 /*
217 * compute how many pbes can fit in the message
218 */
219 pbe_count = (c2dev->req_vq.msg_size -
220 sizeof(struct c2wr_nsmr_register_req)) / sizeof(u64);
221
222 if (pbl_depth <= pbe_count) {
223 flags |= MEM_PBL_COMPLETE;
224 }
225 wr->flags = cpu_to_be16(flags);
226 wr->stag_key = 0; //stag_key;
227 wr->va = cpu_to_be64(*va);
228 wr->pd_id = mr->pd->pd_id;
229 wr->pbe_size = cpu_to_be32(page_size);
230 wr->length = cpu_to_be32(length);
231 wr->pbl_depth = cpu_to_be32(pbl_depth);
232 wr->fbo = cpu_to_be32(offset);
233 count = min(pbl_depth, pbe_count);
234 wr->addrs_length = cpu_to_be32(count);
235
236 /*
237 * fill out the PBL for this message
238 */
239 for (i = 0; i < count; i++) {
240 wr->paddrs[i] = cpu_to_be64(addr_list[i]);
241 }
242
243 /*
244 * regerence the request struct
245 */
246 vq_req_get(c2dev, vq_req);
247
248 /*
249 * send the WR to the adapter
250 */
251 err = vq_send_wr(c2dev, (union c2wr *) wr);
252 if (err) {
253 vq_req_put(c2dev, vq_req);
254 goto bail1;
255 }
256
257 /*
258 * wait for reply from adapter
259 */
260 err = vq_wait_for_reply(c2dev, vq_req);
261 if (err) {
262 goto bail1;
263 }
264
265 /*
266 * process reply
267 */
268 reply =
269 (struct c2wr_nsmr_register_rep *) (unsigned long) (vq_req->reply_msg);
270 if (!reply) {
271 err = -ENOMEM;
272 goto bail1;
273 }
274 if ((err = c2_errno(reply))) {
275 goto bail2;
276 }
277 //*p_pb_entries = be32_to_cpu(reply->pbl_depth);
278 mr->ibmr.lkey = mr->ibmr.rkey = be32_to_cpu(reply->stag_index);
279 vq_repbuf_free(c2dev, reply);
280
281 /*
282 * if there are still more PBEs we need to send them to
283 * the adapter and wait for a reply on the final one.
284 * reuse vq_req for this purpose.
285 */
286 pbl_depth -= count;
287 if (pbl_depth) {
288
289 vq_req->reply_msg = (unsigned long) NULL;
290 atomic_set(&vq_req->reply_ready, 0);
291 err = send_pbl_messages(c2dev,
292 cpu_to_be32(mr->ibmr.lkey),
293 (unsigned long) &addr_list[i],
294 pbl_depth, vq_req, PBL_PHYS);
295 if (err) {
296 goto bail1;
297 }
298 }
299
300 vq_req_free(c2dev, vq_req);
301 kfree(wr);
302
303 return err;
304
305 bail2:
306 vq_repbuf_free(c2dev, reply);
307 bail1:
308 kfree(wr);
309 bail0:
310 vq_req_free(c2dev, vq_req);
311 return err;
312}
313
314int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index)
315{
316 struct c2_vq_req *vq_req; /* verbs request object */
317 struct c2wr_stag_dealloc_req wr; /* work request */
318 struct c2wr_stag_dealloc_rep *reply; /* WR reply */
319 int err;
320
321
322 /*
323 * allocate verbs request object
324 */
325 vq_req = vq_req_alloc(c2dev);
326 if (!vq_req) {
327 return -ENOMEM;
328 }
329
330 /*
331 * Build the WR
332 */
333 c2_wr_set_id(&wr, CCWR_STAG_DEALLOC);
334 wr.hdr.context = (u64) (unsigned long) vq_req;
335 wr.rnic_handle = c2dev->adapter_handle;
336 wr.stag_index = cpu_to_be32(stag_index);
337
338 /*
339 * reference the request struct. dereferenced in the int handler.
340 */
341 vq_req_get(c2dev, vq_req);
342
343 /*
344 * Send WR to adapter
345 */
346 err = vq_send_wr(c2dev, (union c2wr *) & wr);
347 if (err) {
348 vq_req_put(c2dev, vq_req);
349 goto bail0;
350 }
351
352 /*
353 * Wait for reply from adapter
354 */
355 err = vq_wait_for_reply(c2dev, vq_req);
356 if (err) {
357 goto bail0;
358 }
359
360 /*
361 * Process reply
362 */
363 reply = (struct c2wr_stag_dealloc_rep *) (unsigned long) vq_req->reply_msg;
364 if (!reply) {
365 err = -ENOMEM;
366 goto bail0;
367 }
368
369 err = c2_errno(reply);
370
371 vq_repbuf_free(c2dev, reply);
372 bail0:
373 vq_req_free(c2dev, vq_req);
374 return err;
375}
diff --git a/drivers/infiniband/hw/amso1100/c2_mq.c b/drivers/infiniband/hw/amso1100/c2_mq.c
new file mode 100644
index 000000000000..b88a75592102
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_mq.c
@@ -0,0 +1,174 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#include "c2.h"
34#include "c2_mq.h"
35
36void *c2_mq_alloc(struct c2_mq *q)
37{
38 BUG_ON(q->magic != C2_MQ_MAGIC);
39 BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
40
41 if (c2_mq_full(q)) {
42 return NULL;
43 } else {
44#ifdef DEBUG
45 struct c2wr_hdr *m =
46 (struct c2wr_hdr *) (q->msg_pool.host + q->priv * q->msg_size);
47#ifdef CCMSGMAGIC
48 BUG_ON(m->magic != be32_to_cpu(~CCWR_MAGIC));
49 m->magic = cpu_to_be32(CCWR_MAGIC);
50#endif
51 return m;
52#else
53 return q->msg_pool.host + q->priv * q->msg_size;
54#endif
55 }
56}
57
58void c2_mq_produce(struct c2_mq *q)
59{
60 BUG_ON(q->magic != C2_MQ_MAGIC);
61 BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
62
63 if (!c2_mq_full(q)) {
64 q->priv = (q->priv + 1) % q->q_size;
65 q->hint_count++;
66 /* Update peer's offset. */
67 __raw_writew(cpu_to_be16(q->priv), &q->peer->shared);
68 }
69}
70
71void *c2_mq_consume(struct c2_mq *q)
72{
73 BUG_ON(q->magic != C2_MQ_MAGIC);
74 BUG_ON(q->type != C2_MQ_HOST_TARGET);
75
76 if (c2_mq_empty(q)) {
77 return NULL;
78 } else {
79#ifdef DEBUG
80 struct c2wr_hdr *m = (struct c2wr_hdr *)
81 (q->msg_pool.host + q->priv * q->msg_size);
82#ifdef CCMSGMAGIC
83 BUG_ON(m->magic != be32_to_cpu(CCWR_MAGIC));
84#endif
85 return m;
86#else
87 return q->msg_pool.host + q->priv * q->msg_size;
88#endif
89 }
90}
91
92void c2_mq_free(struct c2_mq *q)
93{
94 BUG_ON(q->magic != C2_MQ_MAGIC);
95 BUG_ON(q->type != C2_MQ_HOST_TARGET);
96
97 if (!c2_mq_empty(q)) {
98
99#ifdef CCMSGMAGIC
100 {
101 struct c2wr_hdr __iomem *m = (struct c2wr_hdr __iomem *)
102 (q->msg_pool.adapter + q->priv * q->msg_size);
103 __raw_writel(cpu_to_be32(~CCWR_MAGIC), &m->magic);
104 }
105#endif
106 q->priv = (q->priv + 1) % q->q_size;
107 /* Update peer's offset. */
108 __raw_writew(cpu_to_be16(q->priv), &q->peer->shared);
109 }
110}
111
112
113void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count)
114{
115 BUG_ON(q->magic != C2_MQ_MAGIC);
116 BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
117
118 while (wqe_count--) {
119 BUG_ON(c2_mq_empty(q));
120 *q->shared = cpu_to_be16((be16_to_cpu(*q->shared)+1) % q->q_size);
121 }
122}
123
124#if 0
125u32 c2_mq_count(struct c2_mq *q)
126{
127 s32 count;
128
129 if (q->type == C2_MQ_HOST_TARGET)
130 count = be16_to_cpu(*q->shared) - q->priv;
131 else
132 count = q->priv - be16_to_cpu(*q->shared);
133
134 if (count < 0)
135 count += q->q_size;
136
137 return (u32) count;
138}
139#endif /* 0 */
140
141void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
142 u8 __iomem *pool_start, u16 __iomem *peer, u32 type)
143{
144 BUG_ON(!q->shared);
145
146 /* This code assumes the byte swapping has already been done! */
147 q->index = index;
148 q->q_size = q_size;
149 q->msg_size = msg_size;
150 q->msg_pool.adapter = pool_start;
151 q->peer = (struct c2_mq_shared __iomem *) peer;
152 q->magic = C2_MQ_MAGIC;
153 q->type = type;
154 q->priv = 0;
155 q->hint_count = 0;
156 return;
157}
158void c2_mq_rep_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
159 u8 *pool_start, u16 __iomem *peer, u32 type)
160{
161 BUG_ON(!q->shared);
162
163 /* This code assumes the byte swapping has already been done! */
164 q->index = index;
165 q->q_size = q_size;
166 q->msg_size = msg_size;
167 q->msg_pool.host = pool_start;
168 q->peer = (struct c2_mq_shared __iomem *) peer;
169 q->magic = C2_MQ_MAGIC;
170 q->type = type;
171 q->priv = 0;
172 q->hint_count = 0;
173 return;
174}
diff --git a/drivers/infiniband/hw/amso1100/c2_mq.h b/drivers/infiniband/hw/amso1100/c2_mq.h
new file mode 100644
index 000000000000..9185bbb21658
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_mq.h
@@ -0,0 +1,106 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#ifndef _C2_MQ_H_
35#define _C2_MQ_H_
36#include <linux/kernel.h>
37#include <linux/dma-mapping.h>
38#include "c2_wr.h"
39
40enum c2_shared_regs {
41
42 C2_SHARED_ARMED = 0x10,
43 C2_SHARED_NOTIFY = 0x18,
44 C2_SHARED_SHARED = 0x40,
45};
46
47struct c2_mq_shared {
48 u16 unused1;
49 u8 armed;
50 u8 notification_type;
51 u32 unused2;
52 u16 shared;
53 /* Pad to 64 bytes. */
54 u8 pad[64 - sizeof(u16) - 2 * sizeof(u8) - sizeof(u32) - sizeof(u16)];
55};
56
57enum c2_mq_type {
58 C2_MQ_HOST_TARGET = 1,
59 C2_MQ_ADAPTER_TARGET = 2,
60};
61
62/*
63 * c2_mq_t is for kernel-mode MQs like the VQs Cand the AEQ.
64 * c2_user_mq_t (which is the same format) is for user-mode MQs...
65 */
66#define C2_MQ_MAGIC 0x4d512020 /* 'MQ ' */
67struct c2_mq {
68 u32 magic;
69 union {
70 u8 *host;
71 u8 __iomem *adapter;
72 } msg_pool;
73 dma_addr_t host_dma;
74 DECLARE_PCI_UNMAP_ADDR(mapping);
75 u16 hint_count;
76 u16 priv;
77 struct c2_mq_shared __iomem *peer;
78 u16 *shared;
79 dma_addr_t shared_dma;
80 u32 q_size;
81 u32 msg_size;
82 u32 index;
83 enum c2_mq_type type;
84};
85
86static __inline__ int c2_mq_empty(struct c2_mq *q)
87{
88 return q->priv == be16_to_cpu(*q->shared);
89}
90
91static __inline__ int c2_mq_full(struct c2_mq *q)
92{
93 return q->priv == (be16_to_cpu(*q->shared) + q->q_size - 1) % q->q_size;
94}
95
96extern void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count);
97extern void *c2_mq_alloc(struct c2_mq *q);
98extern void c2_mq_produce(struct c2_mq *q);
99extern void *c2_mq_consume(struct c2_mq *q);
100extern void c2_mq_free(struct c2_mq *q);
101extern void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
102 u8 __iomem *pool_start, u16 __iomem *peer, u32 type);
103extern void c2_mq_rep_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
104 u8 *pool_start, u16 __iomem *peer, u32 type);
105
106#endif /* _C2_MQ_H_ */
diff --git a/drivers/infiniband/hw/amso1100/c2_pd.c b/drivers/infiniband/hw/amso1100/c2_pd.c
new file mode 100644
index 000000000000..00c709926c8d
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_pd.c
@@ -0,0 +1,89 @@
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/init.h>
37#include <linux/errno.h>
38
39#include "c2.h"
40#include "c2_provider.h"
41
42int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd)
43{
44 u32 obj;
45 int ret = 0;
46
47 spin_lock(&c2dev->pd_table.lock);
48 obj = find_next_zero_bit(c2dev->pd_table.table, c2dev->pd_table.max,
49 c2dev->pd_table.last);
50 if (obj >= c2dev->pd_table.max)
51 obj = find_first_zero_bit(c2dev->pd_table.table,
52 c2dev->pd_table.max);
53 if (obj < c2dev->pd_table.max) {
54 pd->pd_id = obj;
55 __set_bit(obj, c2dev->pd_table.table);
56 c2dev->pd_table.last = obj+1;
57 if (c2dev->pd_table.last >= c2dev->pd_table.max)
58 c2dev->pd_table.last = 0;
59 } else
60 ret = -ENOMEM;
61 spin_unlock(&c2dev->pd_table.lock);
62 return ret;
63}
64
65void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd)
66{
67 spin_lock(&c2dev->pd_table.lock);
68 __clear_bit(pd->pd_id, c2dev->pd_table.table);
69 spin_unlock(&c2dev->pd_table.lock);
70}
71
72int __devinit c2_init_pd_table(struct c2_dev *c2dev)
73{
74
75 c2dev->pd_table.last = 0;
76 c2dev->pd_table.max = c2dev->props.max_pd;
77 spin_lock_init(&c2dev->pd_table.lock);
78 c2dev->pd_table.table = kmalloc(BITS_TO_LONGS(c2dev->props.max_pd) *
79 sizeof(long), GFP_KERNEL);
80 if (!c2dev->pd_table.table)
81 return -ENOMEM;
82 bitmap_zero(c2dev->pd_table.table, c2dev->props.max_pd);
83 return 0;
84}
85
86void __devexit c2_cleanup_pd_table(struct c2_dev *c2dev)
87{
88 kfree(c2dev->pd_table.table);
89}
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
new file mode 100644
index 000000000000..dd6af551108b
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -0,0 +1,870 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 */
34
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/pci.h>
38#include <linux/netdevice.h>
39#include <linux/etherdevice.h>
40#include <linux/inetdevice.h>
41#include <linux/delay.h>
42#include <linux/ethtool.h>
43#include <linux/mii.h>
44#include <linux/if_vlan.h>
45#include <linux/crc32.h>
46#include <linux/in.h>
47#include <linux/ip.h>
48#include <linux/tcp.h>
49#include <linux/init.h>
50#include <linux/dma-mapping.h>
51#include <linux/if_arp.h>
52#include <linux/vmalloc.h>
53
54#include <asm/io.h>
55#include <asm/irq.h>
56#include <asm/byteorder.h>
57
58#include <rdma/ib_smi.h>
59#include <rdma/ib_user_verbs.h>
60#include "c2.h"
61#include "c2_provider.h"
62#include "c2_user.h"
63
64static int c2_query_device(struct ib_device *ibdev,
65 struct ib_device_attr *props)
66{
67 struct c2_dev *c2dev = to_c2dev(ibdev);
68
69 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
70
71 *props = c2dev->props;
72 return 0;
73}
74
75static int c2_query_port(struct ib_device *ibdev,
76 u8 port, struct ib_port_attr *props)
77{
78 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
79
80 props->max_mtu = IB_MTU_4096;
81 props->lid = 0;
82 props->lmc = 0;
83 props->sm_lid = 0;
84 props->sm_sl = 0;
85 props->state = IB_PORT_ACTIVE;
86 props->phys_state = 0;
87 props->port_cap_flags =
88 IB_PORT_CM_SUP |
89 IB_PORT_REINIT_SUP |
90 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
91 props->gid_tbl_len = 1;
92 props->pkey_tbl_len = 1;
93 props->qkey_viol_cntr = 0;
94 props->active_width = 1;
95 props->active_speed = 1;
96
97 return 0;
98}
99
100static int c2_modify_port(struct ib_device *ibdev,
101 u8 port, int port_modify_mask,
102 struct ib_port_modify *props)
103{
104 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
105 return 0;
106}
107
108static int c2_query_pkey(struct ib_device *ibdev,
109 u8 port, u16 index, u16 * pkey)
110{
111 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
112 *pkey = 0;
113 return 0;
114}
115
116static int c2_query_gid(struct ib_device *ibdev, u8 port,
117 int index, union ib_gid *gid)
118{
119 struct c2_dev *c2dev = to_c2dev(ibdev);
120
121 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
122 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
123 memcpy(&(gid->raw[0]), c2dev->pseudo_netdev->dev_addr, 6);
124
125 return 0;
126}
127
128/* Allocate the user context data structure. This keeps track
129 * of all objects associated with a particular user-mode client.
130 */
131static struct ib_ucontext *c2_alloc_ucontext(struct ib_device *ibdev,
132 struct ib_udata *udata)
133{
134 struct c2_ucontext *context;
135
136 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
137 context = kmalloc(sizeof(*context), GFP_KERNEL);
138 if (!context)
139 return ERR_PTR(-ENOMEM);
140
141 return &context->ibucontext;
142}
143
144static int c2_dealloc_ucontext(struct ib_ucontext *context)
145{
146 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
147 kfree(context);
148 return 0;
149}
150
151static int c2_mmap_uar(struct ib_ucontext *context, struct vm_area_struct *vma)
152{
153 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
154 return -ENOSYS;
155}
156
157static struct ib_pd *c2_alloc_pd(struct ib_device *ibdev,
158 struct ib_ucontext *context,
159 struct ib_udata *udata)
160{
161 struct c2_pd *pd;
162 int err;
163
164 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
165
166 pd = kmalloc(sizeof(*pd), GFP_KERNEL);
167 if (!pd)
168 return ERR_PTR(-ENOMEM);
169
170 err = c2_pd_alloc(to_c2dev(ibdev), !context, pd);
171 if (err) {
172 kfree(pd);
173 return ERR_PTR(err);
174 }
175
176 if (context) {
177 if (ib_copy_to_udata(udata, &pd->pd_id, sizeof(__u32))) {
178 c2_pd_free(to_c2dev(ibdev), pd);
179 kfree(pd);
180 return ERR_PTR(-EFAULT);
181 }
182 }
183
184 return &pd->ibpd;
185}
186
187static int c2_dealloc_pd(struct ib_pd *pd)
188{
189 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
190 c2_pd_free(to_c2dev(pd->device), to_c2pd(pd));
191 kfree(pd);
192
193 return 0;
194}
195
196static struct ib_ah *c2_ah_create(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
197{
198 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
199 return ERR_PTR(-ENOSYS);
200}
201
202static int c2_ah_destroy(struct ib_ah *ah)
203{
204 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
205 return -ENOSYS;
206}
207
208static void c2_add_ref(struct ib_qp *ibqp)
209{
210 struct c2_qp *qp;
211 BUG_ON(!ibqp);
212 qp = to_c2qp(ibqp);
213 atomic_inc(&qp->refcount);
214}
215
216static void c2_rem_ref(struct ib_qp *ibqp)
217{
218 struct c2_qp *qp;
219 BUG_ON(!ibqp);
220 qp = to_c2qp(ibqp);
221 if (atomic_dec_and_test(&qp->refcount))
222 wake_up(&qp->wait);
223}
224
225struct ib_qp *c2_get_qp(struct ib_device *device, int qpn)
226{
227 struct c2_dev* c2dev = to_c2dev(device);
228 struct c2_qp *qp;
229
230 qp = c2_find_qpn(c2dev, qpn);
231 pr_debug("%s Returning QP=%p for QPN=%d, device=%p, refcount=%d\n",
232 __FUNCTION__, qp, qpn, device,
233 (qp?atomic_read(&qp->refcount):0));
234
235 return (qp?&qp->ibqp:NULL);
236}
237
238static struct ib_qp *c2_create_qp(struct ib_pd *pd,
239 struct ib_qp_init_attr *init_attr,
240 struct ib_udata *udata)
241{
242 struct c2_qp *qp;
243 int err;
244
245 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
246
247 switch (init_attr->qp_type) {
248 case IB_QPT_RC:
249 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
250 if (!qp) {
251 pr_debug("%s: Unable to allocate QP\n", __FUNCTION__);
252 return ERR_PTR(-ENOMEM);
253 }
254 spin_lock_init(&qp->lock);
255 if (pd->uobject) {
256 /* userspace specific */
257 }
258
259 err = c2_alloc_qp(to_c2dev(pd->device),
260 to_c2pd(pd), init_attr, qp);
261
262 if (err && pd->uobject) {
263 /* userspace specific */
264 }
265
266 break;
267 default:
268 pr_debug("%s: Invalid QP type: %d\n", __FUNCTION__,
269 init_attr->qp_type);
270 return ERR_PTR(-EINVAL);
271 break;
272 }
273
274 if (err) {
275 kfree(qp);
276 return ERR_PTR(err);
277 }
278
279 return &qp->ibqp;
280}
281
282static int c2_destroy_qp(struct ib_qp *ib_qp)
283{
284 struct c2_qp *qp = to_c2qp(ib_qp);
285
286 pr_debug("%s:%u qp=%p,qp->state=%d\n",
287 __FUNCTION__, __LINE__,ib_qp,qp->state);
288 c2_free_qp(to_c2dev(ib_qp->device), qp);
289 kfree(qp);
290 return 0;
291}
292
293static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries,
294 struct ib_ucontext *context,
295 struct ib_udata *udata)
296{
297 struct c2_cq *cq;
298 int err;
299
300 cq = kmalloc(sizeof(*cq), GFP_KERNEL);
301 if (!cq) {
302 pr_debug("%s: Unable to allocate CQ\n", __FUNCTION__);
303 return ERR_PTR(-ENOMEM);
304 }
305
306 err = c2_init_cq(to_c2dev(ibdev), entries, NULL, cq);
307 if (err) {
308 pr_debug("%s: error initializing CQ\n", __FUNCTION__);
309 kfree(cq);
310 return ERR_PTR(err);
311 }
312
313 return &cq->ibcq;
314}
315
316static int c2_destroy_cq(struct ib_cq *ib_cq)
317{
318 struct c2_cq *cq = to_c2cq(ib_cq);
319
320 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
321
322 c2_free_cq(to_c2dev(ib_cq->device), cq);
323 kfree(cq);
324
325 return 0;
326}
327
328static inline u32 c2_convert_access(int acc)
329{
330 return (acc & IB_ACCESS_REMOTE_WRITE ? C2_ACF_REMOTE_WRITE : 0) |
331 (acc & IB_ACCESS_REMOTE_READ ? C2_ACF_REMOTE_READ : 0) |
332 (acc & IB_ACCESS_LOCAL_WRITE ? C2_ACF_LOCAL_WRITE : 0) |
333 C2_ACF_LOCAL_READ | C2_ACF_WINDOW_BIND;
334}
335
336static struct ib_mr *c2_reg_phys_mr(struct ib_pd *ib_pd,
337 struct ib_phys_buf *buffer_list,
338 int num_phys_buf, int acc, u64 * iova_start)
339{
340 struct c2_mr *mr;
341 u64 *page_list;
342 u32 total_len;
343 int err, i, j, k, page_shift, pbl_depth;
344
345 pbl_depth = 0;
346 total_len = 0;
347
348 page_shift = PAGE_SHIFT;
349 /*
350 * If there is only 1 buffer we assume this could
351 * be a map of all phy mem...use a 32k page_shift.
352 */
353 if (num_phys_buf == 1)
354 page_shift += 3;
355
356 for (i = 0; i < num_phys_buf; i++) {
357
358 if (buffer_list[i].addr & ~PAGE_MASK) {
359 pr_debug("Unaligned Memory Buffer: 0x%x\n",
360 (unsigned int) buffer_list[i].addr);
361 return ERR_PTR(-EINVAL);
362 }
363
364 if (!buffer_list[i].size) {
365 pr_debug("Invalid Buffer Size\n");
366 return ERR_PTR(-EINVAL);
367 }
368
369 total_len += buffer_list[i].size;
370 pbl_depth += ALIGN(buffer_list[i].size,
371 (1 << page_shift)) >> page_shift;
372 }
373
374 page_list = vmalloc(sizeof(u64) * pbl_depth);
375 if (!page_list) {
376 pr_debug("couldn't vmalloc page_list of size %zd\n",
377 (sizeof(u64) * pbl_depth));
378 return ERR_PTR(-ENOMEM);
379 }
380
381 for (i = 0, j = 0; i < num_phys_buf; i++) {
382
383 int naddrs;
384
385 naddrs = ALIGN(buffer_list[i].size,
386 (1 << page_shift)) >> page_shift;
387 for (k = 0; k < naddrs; k++)
388 page_list[j++] = (buffer_list[i].addr +
389 (k << page_shift));
390 }
391
392 mr = kmalloc(sizeof(*mr), GFP_KERNEL);
393 if (!mr)
394 return ERR_PTR(-ENOMEM);
395
396 mr->pd = to_c2pd(ib_pd);
397 pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, "
398 "*iova_start %llx, first pa %llx, last pa %llx\n",
399 __FUNCTION__, page_shift, pbl_depth, total_len,
400 *iova_start, page_list[0], page_list[pbl_depth-1]);
401 err = c2_nsmr_register_phys_kern(to_c2dev(ib_pd->device), page_list,
402 (1 << page_shift), pbl_depth,
403 total_len, 0, iova_start,
404 c2_convert_access(acc), mr);
405 vfree(page_list);
406 if (err) {
407 kfree(mr);
408 return ERR_PTR(err);
409 }
410
411 return &mr->ibmr;
412}
413
414static struct ib_mr *c2_get_dma_mr(struct ib_pd *pd, int acc)
415{
416 struct ib_phys_buf bl;
417 u64 kva = 0;
418
419 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
420
421 /* AMSO1100 limit */
422 bl.size = 0xffffffff;
423 bl.addr = 0;
424 return c2_reg_phys_mr(pd, &bl, 1, acc, &kva);
425}
426
427static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
428 int acc, struct ib_udata *udata)
429{
430 u64 *pages;
431 u64 kva = 0;
432 int shift, n, len;
433 int i, j, k;
434 int err = 0;
435 struct ib_umem_chunk *chunk;
436 struct c2_pd *c2pd = to_c2pd(pd);
437 struct c2_mr *c2mr;
438
439 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
440 shift = ffs(region->page_size) - 1;
441
442 c2mr = kmalloc(sizeof(*c2mr), GFP_KERNEL);
443 if (!c2mr)
444 return ERR_PTR(-ENOMEM);
445 c2mr->pd = c2pd;
446
447 n = 0;
448 list_for_each_entry(chunk, &region->chunk_list, list)
449 n += chunk->nents;
450
451 pages = kmalloc(n * sizeof(u64), GFP_KERNEL);
452 if (!pages) {
453 err = -ENOMEM;
454 goto err;
455 }
456
457 i = 0;
458 list_for_each_entry(chunk, &region->chunk_list, list) {
459 for (j = 0; j < chunk->nmap; ++j) {
460 len = sg_dma_len(&chunk->page_list[j]) >> shift;
461 for (k = 0; k < len; ++k) {
462 pages[i++] =
463 sg_dma_address(&chunk->page_list[j]) +
464 (region->page_size * k);
465 }
466 }
467 }
468
469 kva = (u64)region->virt_base;
470 err = c2_nsmr_register_phys_kern(to_c2dev(pd->device),
471 pages,
472 region->page_size,
473 i,
474 region->length,
475 region->offset,
476 &kva,
477 c2_convert_access(acc),
478 c2mr);
479 kfree(pages);
480 if (err) {
481 kfree(c2mr);
482 return ERR_PTR(err);
483 }
484 return &c2mr->ibmr;
485
486err:
487 kfree(c2mr);
488 return ERR_PTR(err);
489}
490
491static int c2_dereg_mr(struct ib_mr *ib_mr)
492{
493 struct c2_mr *mr = to_c2mr(ib_mr);
494 int err;
495
496 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
497
498 err = c2_stag_dealloc(to_c2dev(ib_mr->device), ib_mr->lkey);
499 if (err)
500 pr_debug("c2_stag_dealloc failed: %d\n", err);
501 else
502 kfree(mr);
503
504 return err;
505}
506
507static ssize_t show_rev(struct class_device *cdev, char *buf)
508{
509 struct c2_dev *dev = container_of(cdev, struct c2_dev, ibdev.class_dev);
510 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
511 return sprintf(buf, "%x\n", dev->props.hw_ver);
512}
513
514static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
515{
516 struct c2_dev *dev = container_of(cdev, struct c2_dev, ibdev.class_dev);
517 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
518 return sprintf(buf, "%x.%x.%x\n",
519 (int) (dev->props.fw_ver >> 32),
520 (int) (dev->props.fw_ver >> 16) & 0xffff,
521 (int) (dev->props.fw_ver & 0xffff));
522}
523
524static ssize_t show_hca(struct class_device *cdev, char *buf)
525{
526 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
527 return sprintf(buf, "AMSO1100\n");
528}
529
530static ssize_t show_board(struct class_device *cdev, char *buf)
531{
532 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
533 return sprintf(buf, "%.*s\n", 32, "AMSO1100 Board ID");
534}
535
536static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
537static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
538static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
539static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
540
541static struct class_device_attribute *c2_class_attributes[] = {
542 &class_device_attr_hw_rev,
543 &class_device_attr_fw_ver,
544 &class_device_attr_hca_type,
545 &class_device_attr_board_id
546};
547
548static int c2_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
549 int attr_mask, struct ib_udata *udata)
550{
551 int err;
552
553 err =
554 c2_qp_modify(to_c2dev(ibqp->device), to_c2qp(ibqp), attr,
555 attr_mask);
556
557 return err;
558}
559
560static int c2_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
561{
562 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
563 return -ENOSYS;
564}
565
566static int c2_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
567{
568 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
569 return -ENOSYS;
570}
571
572static int c2_process_mad(struct ib_device *ibdev,
573 int mad_flags,
574 u8 port_num,
575 struct ib_wc *in_wc,
576 struct ib_grh *in_grh,
577 struct ib_mad *in_mad, struct ib_mad *out_mad)
578{
579 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
580 return -ENOSYS;
581}
582
583static int c2_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
584{
585 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
586
587 /* Request a connection */
588 return c2_llp_connect(cm_id, iw_param);
589}
590
591static int c2_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
592{
593 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
594
595 /* Accept the new connection */
596 return c2_llp_accept(cm_id, iw_param);
597}
598
599static int c2_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
600{
601 int err;
602
603 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
604
605 err = c2_llp_reject(cm_id, pdata, pdata_len);
606 return err;
607}
608
609static int c2_service_create(struct iw_cm_id *cm_id, int backlog)
610{
611 int err;
612
613 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
614 err = c2_llp_service_create(cm_id, backlog);
615 pr_debug("%s:%u err=%d\n",
616 __FUNCTION__, __LINE__,
617 err);
618 return err;
619}
620
621static int c2_service_destroy(struct iw_cm_id *cm_id)
622{
623 int err;
624 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
625
626 err = c2_llp_service_destroy(cm_id);
627
628 return err;
629}
630
631static int c2_pseudo_up(struct net_device *netdev)
632{
633 struct in_device *ind;
634 struct c2_dev *c2dev = netdev->priv;
635
636 ind = in_dev_get(netdev);
637 if (!ind)
638 return 0;
639
640 pr_debug("adding...\n");
641 for_ifa(ind) {
642#ifdef DEBUG
643 u8 *ip = (u8 *) & ifa->ifa_address;
644
645 pr_debug("%s: %d.%d.%d.%d\n",
646 ifa->ifa_label, ip[0], ip[1], ip[2], ip[3]);
647#endif
648 c2_add_addr(c2dev, ifa->ifa_address, ifa->ifa_mask);
649 }
650 endfor_ifa(ind);
651 in_dev_put(ind);
652
653 return 0;
654}
655
656static int c2_pseudo_down(struct net_device *netdev)
657{
658 struct in_device *ind;
659 struct c2_dev *c2dev = netdev->priv;
660
661 ind = in_dev_get(netdev);
662 if (!ind)
663 return 0;
664
665 pr_debug("deleting...\n");
666 for_ifa(ind) {
667#ifdef DEBUG
668 u8 *ip = (u8 *) & ifa->ifa_address;
669
670 pr_debug("%s: %d.%d.%d.%d\n",
671 ifa->ifa_label, ip[0], ip[1], ip[2], ip[3]);
672#endif
673 c2_del_addr(c2dev, ifa->ifa_address, ifa->ifa_mask);
674 }
675 endfor_ifa(ind);
676 in_dev_put(ind);
677
678 return 0;
679}
680
681static int c2_pseudo_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
682{
683 kfree_skb(skb);
684 return NETDEV_TX_OK;
685}
686
687static int c2_pseudo_change_mtu(struct net_device *netdev, int new_mtu)
688{
689 int ret = 0;
690
691 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
692 return -EINVAL;
693
694 netdev->mtu = new_mtu;
695
696 /* TODO: Tell rnic about new rmda interface mtu */
697 return ret;
698}
699
700static void setup(struct net_device *netdev)
701{
702 SET_MODULE_OWNER(netdev);
703 netdev->open = c2_pseudo_up;
704 netdev->stop = c2_pseudo_down;
705 netdev->hard_start_xmit = c2_pseudo_xmit_frame;
706 netdev->get_stats = NULL;
707 netdev->tx_timeout = NULL;
708 netdev->set_mac_address = NULL;
709 netdev->change_mtu = c2_pseudo_change_mtu;
710 netdev->watchdog_timeo = 0;
711 netdev->type = ARPHRD_ETHER;
712 netdev->mtu = 1500;
713 netdev->hard_header_len = ETH_HLEN;
714 netdev->addr_len = ETH_ALEN;
715 netdev->tx_queue_len = 0;
716 netdev->flags |= IFF_NOARP;
717 return;
718}
719
720static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev)
721{
722 char name[IFNAMSIZ];
723 struct net_device *netdev;
724
725 /* change ethxxx to iwxxx */
726 strcpy(name, "iw");
727 strcat(name, &c2dev->netdev->name[3]);
728 netdev = alloc_netdev(sizeof(*netdev), name, setup);
729 if (!netdev) {
730 printk(KERN_ERR PFX "%s - etherdev alloc failed",
731 __FUNCTION__);
732 return NULL;
733 }
734
735 netdev->priv = c2dev;
736
737 SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev);
738
739 memcpy_fromio(netdev->dev_addr, c2dev->kva + C2_REGS_RDMA_ENADDR, 6);
740
741 /* Print out the MAC address */
742 pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X\n",
743 netdev->name,
744 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
745 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
746
747#if 0
748 /* Disable network packets */
749 netif_stop_queue(netdev);
750#endif
751 return netdev;
752}
753
754int c2_register_device(struct c2_dev *dev)
755{
756 int ret;
757 int i;
758
759 /* Register pseudo network device */
760 dev->pseudo_netdev = c2_pseudo_netdev_init(dev);
761 if (dev->pseudo_netdev) {
762 ret = register_netdev(dev->pseudo_netdev);
763 if (ret) {
764 printk(KERN_ERR PFX
765 "Unable to register netdev, ret = %d\n", ret);
766 free_netdev(dev->pseudo_netdev);
767 return ret;
768 }
769 }
770
771 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
772 strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX);
773 dev->ibdev.owner = THIS_MODULE;
774 dev->ibdev.uverbs_cmd_mask =
775 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
776 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
777 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
778 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
779 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
780 (1ull << IB_USER_VERBS_CMD_REG_MR) |
781 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
782 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
783 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
784 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
785 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
786 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
787 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
788 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
789 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
790 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
791 (1ull << IB_USER_VERBS_CMD_POST_RECV);
792
793 dev->ibdev.node_type = RDMA_NODE_RNIC;
794 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
795 memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6);
796 dev->ibdev.phys_port_cnt = 1;
797 dev->ibdev.dma_device = &dev->pcidev->dev;
798 dev->ibdev.class_dev.dev = &dev->pcidev->dev;
799 dev->ibdev.query_device = c2_query_device;
800 dev->ibdev.query_port = c2_query_port;
801 dev->ibdev.modify_port = c2_modify_port;
802 dev->ibdev.query_pkey = c2_query_pkey;
803 dev->ibdev.query_gid = c2_query_gid;
804 dev->ibdev.alloc_ucontext = c2_alloc_ucontext;
805 dev->ibdev.dealloc_ucontext = c2_dealloc_ucontext;
806 dev->ibdev.mmap = c2_mmap_uar;
807 dev->ibdev.alloc_pd = c2_alloc_pd;
808 dev->ibdev.dealloc_pd = c2_dealloc_pd;
809 dev->ibdev.create_ah = c2_ah_create;
810 dev->ibdev.destroy_ah = c2_ah_destroy;
811 dev->ibdev.create_qp = c2_create_qp;
812 dev->ibdev.modify_qp = c2_modify_qp;
813 dev->ibdev.destroy_qp = c2_destroy_qp;
814 dev->ibdev.create_cq = c2_create_cq;
815 dev->ibdev.destroy_cq = c2_destroy_cq;
816 dev->ibdev.poll_cq = c2_poll_cq;
817 dev->ibdev.get_dma_mr = c2_get_dma_mr;
818 dev->ibdev.reg_phys_mr = c2_reg_phys_mr;
819 dev->ibdev.reg_user_mr = c2_reg_user_mr;
820 dev->ibdev.dereg_mr = c2_dereg_mr;
821
822 dev->ibdev.alloc_fmr = NULL;
823 dev->ibdev.unmap_fmr = NULL;
824 dev->ibdev.dealloc_fmr = NULL;
825 dev->ibdev.map_phys_fmr = NULL;
826
827 dev->ibdev.attach_mcast = c2_multicast_attach;
828 dev->ibdev.detach_mcast = c2_multicast_detach;
829 dev->ibdev.process_mad = c2_process_mad;
830
831 dev->ibdev.req_notify_cq = c2_arm_cq;
832 dev->ibdev.post_send = c2_post_send;
833 dev->ibdev.post_recv = c2_post_receive;
834
835 dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
836 dev->ibdev.iwcm->add_ref = c2_add_ref;
837 dev->ibdev.iwcm->rem_ref = c2_rem_ref;
838 dev->ibdev.iwcm->get_qp = c2_get_qp;
839 dev->ibdev.iwcm->connect = c2_connect;
840 dev->ibdev.iwcm->accept = c2_accept;
841 dev->ibdev.iwcm->reject = c2_reject;
842 dev->ibdev.iwcm->create_listen = c2_service_create;
843 dev->ibdev.iwcm->destroy_listen = c2_service_destroy;
844
845 ret = ib_register_device(&dev->ibdev);
846 if (ret)
847 return ret;
848
849 for (i = 0; i < ARRAY_SIZE(c2_class_attributes); ++i) {
850 ret = class_device_create_file(&dev->ibdev.class_dev,
851 c2_class_attributes[i]);
852 if (ret) {
853 unregister_netdev(dev->pseudo_netdev);
854 free_netdev(dev->pseudo_netdev);
855 ib_unregister_device(&dev->ibdev);
856 return ret;
857 }
858 }
859
860 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
861 return 0;
862}
863
864void c2_unregister_device(struct c2_dev *dev)
865{
866 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
867 unregister_netdev(dev->pseudo_netdev);
868 free_netdev(dev->pseudo_netdev);
869 ib_unregister_device(&dev->ibdev);
870}
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.h b/drivers/infiniband/hw/amso1100/c2_provider.h
new file mode 100644
index 000000000000..fc906223220f
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_provider.h
@@ -0,0 +1,181 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 */
34
35#ifndef C2_PROVIDER_H
36#define C2_PROVIDER_H
37#include <linux/inetdevice.h>
38
39#include <rdma/ib_verbs.h>
40#include <rdma/ib_pack.h>
41
42#include "c2_mq.h"
43#include <rdma/iw_cm.h>
44
45#define C2_MPT_FLAG_ATOMIC (1 << 14)
46#define C2_MPT_FLAG_REMOTE_WRITE (1 << 13)
47#define C2_MPT_FLAG_REMOTE_READ (1 << 12)
48#define C2_MPT_FLAG_LOCAL_WRITE (1 << 11)
49#define C2_MPT_FLAG_LOCAL_READ (1 << 10)
50
51struct c2_buf_list {
52 void *buf;
53 DECLARE_PCI_UNMAP_ADDR(mapping)
54};
55
56
57/* The user context keeps track of objects allocated for a
58 * particular user-mode client. */
59struct c2_ucontext {
60 struct ib_ucontext ibucontext;
61};
62
63struct c2_mtt;
64
65/* All objects associated with a PD are kept in the
66 * associated user context if present.
67 */
68struct c2_pd {
69 struct ib_pd ibpd;
70 u32 pd_id;
71};
72
73struct c2_mr {
74 struct ib_mr ibmr;
75 struct c2_pd *pd;
76};
77
78struct c2_av;
79
80enum c2_ah_type {
81 C2_AH_ON_HCA,
82 C2_AH_PCI_POOL,
83 C2_AH_KMALLOC
84};
85
86struct c2_ah {
87 struct ib_ah ibah;
88};
89
90struct c2_cq {
91 struct ib_cq ibcq;
92 spinlock_t lock;
93 atomic_t refcount;
94 int cqn;
95 int is_kernel;
96 wait_queue_head_t wait;
97
98 u32 adapter_handle;
99 struct c2_mq mq;
100};
101
102struct c2_wq {
103 spinlock_t lock;
104};
105struct iw_cm_id;
106struct c2_qp {
107 struct ib_qp ibqp;
108 struct iw_cm_id *cm_id;
109 spinlock_t lock;
110 atomic_t refcount;
111 wait_queue_head_t wait;
112 int qpn;
113
114 u32 adapter_handle;
115 u32 send_sgl_depth;
116 u32 recv_sgl_depth;
117 u32 rdma_write_sgl_depth;
118 u8 state;
119
120 struct c2_mq sq_mq;
121 struct c2_mq rq_mq;
122};
123
124struct c2_cr_query_attrs {
125 u32 local_addr;
126 u32 remote_addr;
127 u16 local_port;
128 u16 remote_port;
129};
130
131static inline struct c2_pd *to_c2pd(struct ib_pd *ibpd)
132{
133 return container_of(ibpd, struct c2_pd, ibpd);
134}
135
136static inline struct c2_ucontext *to_c2ucontext(struct ib_ucontext *ibucontext)
137{
138 return container_of(ibucontext, struct c2_ucontext, ibucontext);
139}
140
141static inline struct c2_mr *to_c2mr(struct ib_mr *ibmr)
142{
143 return container_of(ibmr, struct c2_mr, ibmr);
144}
145
146
147static inline struct c2_ah *to_c2ah(struct ib_ah *ibah)
148{
149 return container_of(ibah, struct c2_ah, ibah);
150}
151
152static inline struct c2_cq *to_c2cq(struct ib_cq *ibcq)
153{
154 return container_of(ibcq, struct c2_cq, ibcq);
155}
156
157static inline struct c2_qp *to_c2qp(struct ib_qp *ibqp)
158{
159 return container_of(ibqp, struct c2_qp, ibqp);
160}
161
162static inline int is_rnic_addr(struct net_device *netdev, u32 addr)
163{
164 struct in_device *ind;
165 int ret = 0;
166
167 ind = in_dev_get(netdev);
168 if (!ind)
169 return 0;
170
171 for_ifa(ind) {
172 if (ifa->ifa_address == addr) {
173 ret = 1;
174 break;
175 }
176 }
177 endfor_ifa(ind);
178 in_dev_put(ind);
179 return ret;
180}
181#endif /* C2_PROVIDER_H */
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c
new file mode 100644
index 000000000000..12261132b077
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_qp.c
@@ -0,0 +1,975 @@
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 *
36 */
37
38#include "c2.h"
39#include "c2_vq.h"
40#include "c2_status.h"
41
42#define C2_MAX_ORD_PER_QP 128
43#define C2_MAX_IRD_PER_QP 128
44
45#define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count)
46#define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16)
47#define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF)
48
49#define NO_SUPPORT -1
50static const u8 c2_opcode[] = {
51 [IB_WR_SEND] = C2_WR_TYPE_SEND,
52 [IB_WR_SEND_WITH_IMM] = NO_SUPPORT,
53 [IB_WR_RDMA_WRITE] = C2_WR_TYPE_RDMA_WRITE,
54 [IB_WR_RDMA_WRITE_WITH_IMM] = NO_SUPPORT,
55 [IB_WR_RDMA_READ] = C2_WR_TYPE_RDMA_READ,
56 [IB_WR_ATOMIC_CMP_AND_SWP] = NO_SUPPORT,
57 [IB_WR_ATOMIC_FETCH_AND_ADD] = NO_SUPPORT,
58};
59
60static int to_c2_state(enum ib_qp_state ib_state)
61{
62 switch (ib_state) {
63 case IB_QPS_RESET:
64 return C2_QP_STATE_IDLE;
65 case IB_QPS_RTS:
66 return C2_QP_STATE_RTS;
67 case IB_QPS_SQD:
68 return C2_QP_STATE_CLOSING;
69 case IB_QPS_SQE:
70 return C2_QP_STATE_CLOSING;
71 case IB_QPS_ERR:
72 return C2_QP_STATE_ERROR;
73 default:
74 return -1;
75 }
76}
77
78static int to_ib_state(enum c2_qp_state c2_state)
79{
80 switch (c2_state) {
81 case C2_QP_STATE_IDLE:
82 return IB_QPS_RESET;
83 case C2_QP_STATE_CONNECTING:
84 return IB_QPS_RTR;
85 case C2_QP_STATE_RTS:
86 return IB_QPS_RTS;
87 case C2_QP_STATE_CLOSING:
88 return IB_QPS_SQD;
89 case C2_QP_STATE_ERROR:
90 return IB_QPS_ERR;
91 case C2_QP_STATE_TERMINATE:
92 return IB_QPS_SQE;
93 default:
94 return -1;
95 }
96}
97
98static const char *to_ib_state_str(int ib_state)
99{
100 static const char *state_str[] = {
101 "IB_QPS_RESET",
102 "IB_QPS_INIT",
103 "IB_QPS_RTR",
104 "IB_QPS_RTS",
105 "IB_QPS_SQD",
106 "IB_QPS_SQE",
107 "IB_QPS_ERR"
108 };
109 if (ib_state < IB_QPS_RESET ||
110 ib_state > IB_QPS_ERR)
111 return "<invalid IB QP state>";
112
113 ib_state -= IB_QPS_RESET;
114 return state_str[ib_state];
115}
116
117void c2_set_qp_state(struct c2_qp *qp, int c2_state)
118{
119 int new_state = to_ib_state(c2_state);
120
121 pr_debug("%s: qp[%p] state modify %s --> %s\n",
122 __FUNCTION__,
123 qp,
124 to_ib_state_str(qp->state),
125 to_ib_state_str(new_state));
126 qp->state = new_state;
127}
128
129#define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFF
130
131int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
132 struct ib_qp_attr *attr, int attr_mask)
133{
134 struct c2wr_qp_modify_req wr;
135 struct c2wr_qp_modify_rep *reply;
136 struct c2_vq_req *vq_req;
137 unsigned long flags;
138 u8 next_state;
139 int err;
140
141 pr_debug("%s:%d qp=%p, %s --> %s\n",
142 __FUNCTION__, __LINE__,
143 qp,
144 to_ib_state_str(qp->state),
145 to_ib_state_str(attr->qp_state));
146
147 vq_req = vq_req_alloc(c2dev);
148 if (!vq_req)
149 return -ENOMEM;
150
151 c2_wr_set_id(&wr, CCWR_QP_MODIFY);
152 wr.hdr.context = (unsigned long) vq_req;
153 wr.rnic_handle = c2dev->adapter_handle;
154 wr.qp_handle = qp->adapter_handle;
155 wr.ord = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
156 wr.ird = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
157 wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
158 wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
159
160 if (attr_mask & IB_QP_STATE) {
161 /* Ensure the state is valid */
162 if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR)
163 return -EINVAL;
164
165 wr.next_qp_state = cpu_to_be32(to_c2_state(attr->qp_state));
166
167 if (attr->qp_state == IB_QPS_ERR) {
168 spin_lock_irqsave(&qp->lock, flags);
169 if (qp->cm_id && qp->state == IB_QPS_RTS) {
170 pr_debug("Generating CLOSE event for QP-->ERR, "
171 "qp=%p, cm_id=%p\n",qp,qp->cm_id);
172 /* Generate an CLOSE event */
173 vq_req->cm_id = qp->cm_id;
174 vq_req->event = IW_CM_EVENT_CLOSE;
175 }
176 spin_unlock_irqrestore(&qp->lock, flags);
177 }
178 next_state = attr->qp_state;
179
180 } else if (attr_mask & IB_QP_CUR_STATE) {
181
182 if (attr->cur_qp_state != IB_QPS_RTR &&
183 attr->cur_qp_state != IB_QPS_RTS &&
184 attr->cur_qp_state != IB_QPS_SQD &&
185 attr->cur_qp_state != IB_QPS_SQE)
186 return -EINVAL;
187 else
188 wr.next_qp_state =
189 cpu_to_be32(to_c2_state(attr->cur_qp_state));
190
191 next_state = attr->cur_qp_state;
192
193 } else {
194 err = 0;
195 goto bail0;
196 }
197
198 /* reference the request struct */
199 vq_req_get(c2dev, vq_req);
200
201 err = vq_send_wr(c2dev, (union c2wr *) & wr);
202 if (err) {
203 vq_req_put(c2dev, vq_req);
204 goto bail0;
205 }
206
207 err = vq_wait_for_reply(c2dev, vq_req);
208 if (err)
209 goto bail0;
210
211 reply = (struct c2wr_qp_modify_rep *) (unsigned long) vq_req->reply_msg;
212 if (!reply) {
213 err = -ENOMEM;
214 goto bail0;
215 }
216
217 err = c2_errno(reply);
218 if (!err)
219 qp->state = next_state;
220#ifdef DEBUG
221 else
222 pr_debug("%s: c2_errno=%d\n", __FUNCTION__, err);
223#endif
224 /*
225 * If we're going to error and generating the event here, then
226 * we need to remove the reference because there will be no
227 * close event generated by the adapter
228 */
229 spin_lock_irqsave(&qp->lock, flags);
230 if (vq_req->event==IW_CM_EVENT_CLOSE && qp->cm_id) {
231 qp->cm_id->rem_ref(qp->cm_id);
232 qp->cm_id = NULL;
233 }
234 spin_unlock_irqrestore(&qp->lock, flags);
235
236 vq_repbuf_free(c2dev, reply);
237 bail0:
238 vq_req_free(c2dev, vq_req);
239
240 pr_debug("%s:%d qp=%p, cur_state=%s\n",
241 __FUNCTION__, __LINE__,
242 qp,
243 to_ib_state_str(qp->state));
244 return err;
245}
246
247int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp,
248 int ord, int ird)
249{
250 struct c2wr_qp_modify_req wr;
251 struct c2wr_qp_modify_rep *reply;
252 struct c2_vq_req *vq_req;
253 int err;
254
255 vq_req = vq_req_alloc(c2dev);
256 if (!vq_req)
257 return -ENOMEM;
258
259 c2_wr_set_id(&wr, CCWR_QP_MODIFY);
260 wr.hdr.context = (unsigned long) vq_req;
261 wr.rnic_handle = c2dev->adapter_handle;
262 wr.qp_handle = qp->adapter_handle;
263 wr.ord = cpu_to_be32(ord);
264 wr.ird = cpu_to_be32(ird);
265 wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
266 wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
267 wr.next_qp_state = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
268
269 /* reference the request struct */
270 vq_req_get(c2dev, vq_req);
271
272 err = vq_send_wr(c2dev, (union c2wr *) & wr);
273 if (err) {
274 vq_req_put(c2dev, vq_req);
275 goto bail0;
276 }
277
278 err = vq_wait_for_reply(c2dev, vq_req);
279 if (err)
280 goto bail0;
281
282 reply = (struct c2wr_qp_modify_rep *) (unsigned long)
283 vq_req->reply_msg;
284 if (!reply) {
285 err = -ENOMEM;
286 goto bail0;
287 }
288
289 err = c2_errno(reply);
290 vq_repbuf_free(c2dev, reply);
291 bail0:
292 vq_req_free(c2dev, vq_req);
293 return err;
294}
295
296static int destroy_qp(struct c2_dev *c2dev, struct c2_qp *qp)
297{
298 struct c2_vq_req *vq_req;
299 struct c2wr_qp_destroy_req wr;
300 struct c2wr_qp_destroy_rep *reply;
301 unsigned long flags;
302 int err;
303
304 /*
305 * Allocate a verb request message
306 */
307 vq_req = vq_req_alloc(c2dev);
308 if (!vq_req) {
309 return -ENOMEM;
310 }
311
312 /*
313 * Initialize the WR
314 */
315 c2_wr_set_id(&wr, CCWR_QP_DESTROY);
316 wr.hdr.context = (unsigned long) vq_req;
317 wr.rnic_handle = c2dev->adapter_handle;
318 wr.qp_handle = qp->adapter_handle;
319
320 /*
321 * reference the request struct. dereferenced in the int handler.
322 */
323 vq_req_get(c2dev, vq_req);
324
325 spin_lock_irqsave(&qp->lock, flags);
326 if (qp->cm_id && qp->state == IB_QPS_RTS) {
327 pr_debug("destroy_qp: generating CLOSE event for QP-->ERR, "
328 "qp=%p, cm_id=%p\n",qp,qp->cm_id);
329 /* Generate an CLOSE event */
330 vq_req->qp = qp;
331 vq_req->cm_id = qp->cm_id;
332 vq_req->event = IW_CM_EVENT_CLOSE;
333 }
334 spin_unlock_irqrestore(&qp->lock, flags);
335
336 /*
337 * Send WR to adapter
338 */
339 err = vq_send_wr(c2dev, (union c2wr *) & wr);
340 if (err) {
341 vq_req_put(c2dev, vq_req);
342 goto bail0;
343 }
344
345 /*
346 * Wait for reply from adapter
347 */
348 err = vq_wait_for_reply(c2dev, vq_req);
349 if (err) {
350 goto bail0;
351 }
352
353 /*
354 * Process reply
355 */
356 reply = (struct c2wr_qp_destroy_rep *) (unsigned long) (vq_req->reply_msg);
357 if (!reply) {
358 err = -ENOMEM;
359 goto bail0;
360 }
361
362 spin_lock_irqsave(&qp->lock, flags);
363 if (qp->cm_id) {
364 qp->cm_id->rem_ref(qp->cm_id);
365 qp->cm_id = NULL;
366 }
367 spin_unlock_irqrestore(&qp->lock, flags);
368
369 vq_repbuf_free(c2dev, reply);
370 bail0:
371 vq_req_free(c2dev, vq_req);
372 return err;
373}
374
375static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp)
376{
377 int ret;
378
379 do {
380 spin_lock_irq(&c2dev->qp_table.lock);
381 ret = idr_get_new_above(&c2dev->qp_table.idr, qp,
382 c2dev->qp_table.last++, &qp->qpn);
383 spin_unlock_irq(&c2dev->qp_table.lock);
384 } while ((ret == -EAGAIN) &&
385 idr_pre_get(&c2dev->qp_table.idr, GFP_KERNEL));
386 return ret;
387}
388
389static void c2_free_qpn(struct c2_dev *c2dev, int qpn)
390{
391 spin_lock_irq(&c2dev->qp_table.lock);
392 idr_remove(&c2dev->qp_table.idr, qpn);
393 spin_unlock_irq(&c2dev->qp_table.lock);
394}
395
396struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn)
397{
398 unsigned long flags;
399 struct c2_qp *qp;
400
401 spin_lock_irqsave(&c2dev->qp_table.lock, flags);
402 qp = idr_find(&c2dev->qp_table.idr, qpn);
403 spin_unlock_irqrestore(&c2dev->qp_table.lock, flags);
404 return qp;
405}
406
407int c2_alloc_qp(struct c2_dev *c2dev,
408 struct c2_pd *pd,
409 struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp)
410{
411 struct c2wr_qp_create_req wr;
412 struct c2wr_qp_create_rep *reply;
413 struct c2_vq_req *vq_req;
414 struct c2_cq *send_cq = to_c2cq(qp_attrs->send_cq);
415 struct c2_cq *recv_cq = to_c2cq(qp_attrs->recv_cq);
416 unsigned long peer_pa;
417 u32 q_size, msg_size, mmap_size;
418 void __iomem *mmap;
419 int err;
420
421 err = c2_alloc_qpn(c2dev, qp);
422 if (err)
423 return err;
424 qp->ibqp.qp_num = qp->qpn;
425 qp->ibqp.qp_type = IB_QPT_RC;
426
427 /* Allocate the SQ and RQ shared pointers */
428 qp->sq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
429 &qp->sq_mq.shared_dma, GFP_KERNEL);
430 if (!qp->sq_mq.shared) {
431 err = -ENOMEM;
432 goto bail0;
433 }
434
435 qp->rq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
436 &qp->rq_mq.shared_dma, GFP_KERNEL);
437 if (!qp->rq_mq.shared) {
438 err = -ENOMEM;
439 goto bail1;
440 }
441
442 /* Allocate the verbs request */
443 vq_req = vq_req_alloc(c2dev);
444 if (vq_req == NULL) {
445 err = -ENOMEM;
446 goto bail2;
447 }
448
449 /* Initialize the work request */
450 memset(&wr, 0, sizeof(wr));
451 c2_wr_set_id(&wr, CCWR_QP_CREATE);
452 wr.hdr.context = (unsigned long) vq_req;
453 wr.rnic_handle = c2dev->adapter_handle;
454 wr.sq_cq_handle = send_cq->adapter_handle;
455 wr.rq_cq_handle = recv_cq->adapter_handle;
456 wr.sq_depth = cpu_to_be32(qp_attrs->cap.max_send_wr + 1);
457 wr.rq_depth = cpu_to_be32(qp_attrs->cap.max_recv_wr + 1);
458 wr.srq_handle = 0;
459 wr.flags = cpu_to_be32(QP_RDMA_READ | QP_RDMA_WRITE | QP_MW_BIND |
460 QP_ZERO_STAG | QP_RDMA_READ_RESPONSE);
461 wr.send_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
462 wr.recv_sgl_depth = cpu_to_be32(qp_attrs->cap.max_recv_sge);
463 wr.rdma_write_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
464 wr.shared_sq_ht = cpu_to_be64(qp->sq_mq.shared_dma);
465 wr.shared_rq_ht = cpu_to_be64(qp->rq_mq.shared_dma);
466 wr.ord = cpu_to_be32(C2_MAX_ORD_PER_QP);
467 wr.ird = cpu_to_be32(C2_MAX_IRD_PER_QP);
468 wr.pd_id = pd->pd_id;
469 wr.user_context = (unsigned long) qp;
470
471 vq_req_get(c2dev, vq_req);
472
473 /* Send the WR to the adapter */
474 err = vq_send_wr(c2dev, (union c2wr *) & wr);
475 if (err) {
476 vq_req_put(c2dev, vq_req);
477 goto bail3;
478 }
479
480 /* Wait for the verb reply */
481 err = vq_wait_for_reply(c2dev, vq_req);
482 if (err) {
483 goto bail3;
484 }
485
486 /* Process the reply */
487 reply = (struct c2wr_qp_create_rep *) (unsigned long) (vq_req->reply_msg);
488 if (!reply) {
489 err = -ENOMEM;
490 goto bail3;
491 }
492
493 if ((err = c2_wr_get_result(reply)) != 0) {
494 goto bail4;
495 }
496
497 /* Fill in the kernel QP struct */
498 atomic_set(&qp->refcount, 1);
499 qp->adapter_handle = reply->qp_handle;
500 qp->state = IB_QPS_RESET;
501 qp->send_sgl_depth = qp_attrs->cap.max_send_sge;
502 qp->rdma_write_sgl_depth = qp_attrs->cap.max_send_sge;
503 qp->recv_sgl_depth = qp_attrs->cap.max_recv_sge;
504
505 /* Initialize the SQ MQ */
506 q_size = be32_to_cpu(reply->sq_depth);
507 msg_size = be32_to_cpu(reply->sq_msg_size);
508 peer_pa = c2dev->pa + be32_to_cpu(reply->sq_mq_start);
509 mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
510 mmap = ioremap_nocache(peer_pa, mmap_size);
511 if (!mmap) {
512 err = -ENOMEM;
513 goto bail5;
514 }
515
516 c2_mq_req_init(&qp->sq_mq,
517 be32_to_cpu(reply->sq_mq_index),
518 q_size,
519 msg_size,
520 mmap + sizeof(struct c2_mq_shared), /* pool start */
521 mmap, /* peer */
522 C2_MQ_ADAPTER_TARGET);
523
524 /* Initialize the RQ mq */
525 q_size = be32_to_cpu(reply->rq_depth);
526 msg_size = be32_to_cpu(reply->rq_msg_size);
527 peer_pa = c2dev->pa + be32_to_cpu(reply->rq_mq_start);
528 mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
529 mmap = ioremap_nocache(peer_pa, mmap_size);
530 if (!mmap) {
531 err = -ENOMEM;
532 goto bail6;
533 }
534
535 c2_mq_req_init(&qp->rq_mq,
536 be32_to_cpu(reply->rq_mq_index),
537 q_size,
538 msg_size,
539 mmap + sizeof(struct c2_mq_shared), /* pool start */
540 mmap, /* peer */
541 C2_MQ_ADAPTER_TARGET);
542
543 vq_repbuf_free(c2dev, reply);
544 vq_req_free(c2dev, vq_req);
545
546 return 0;
547
548 bail6:
549 iounmap(qp->sq_mq.peer);
550 bail5:
551 destroy_qp(c2dev, qp);
552 bail4:
553 vq_repbuf_free(c2dev, reply);
554 bail3:
555 vq_req_free(c2dev, vq_req);
556 bail2:
557 c2_free_mqsp(qp->rq_mq.shared);
558 bail1:
559 c2_free_mqsp(qp->sq_mq.shared);
560 bail0:
561 c2_free_qpn(c2dev, qp->qpn);
562 return err;
563}
564
565void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp)
566{
567 struct c2_cq *send_cq;
568 struct c2_cq *recv_cq;
569
570 send_cq = to_c2cq(qp->ibqp.send_cq);
571 recv_cq = to_c2cq(qp->ibqp.recv_cq);
572
573 /*
574 * Lock CQs here, so that CQ polling code can do QP lookup
575 * without taking a lock.
576 */
577 spin_lock_irq(&send_cq->lock);
578 if (send_cq != recv_cq)
579 spin_lock(&recv_cq->lock);
580
581 c2_free_qpn(c2dev, qp->qpn);
582
583 if (send_cq != recv_cq)
584 spin_unlock(&recv_cq->lock);
585 spin_unlock_irq(&send_cq->lock);
586
587 /*
588 * Destory qp in the rnic...
589 */
590 destroy_qp(c2dev, qp);
591
592 /*
593 * Mark any unreaped CQEs as null and void.
594 */
595 c2_cq_clean(c2dev, qp, send_cq->cqn);
596 if (send_cq != recv_cq)
597 c2_cq_clean(c2dev, qp, recv_cq->cqn);
598 /*
599 * Unmap the MQs and return the shared pointers
600 * to the message pool.
601 */
602 iounmap(qp->sq_mq.peer);
603 iounmap(qp->rq_mq.peer);
604 c2_free_mqsp(qp->sq_mq.shared);
605 c2_free_mqsp(qp->rq_mq.shared);
606
607 atomic_dec(&qp->refcount);
608 wait_event(qp->wait, !atomic_read(&qp->refcount));
609}
610
611/*
612 * Function: move_sgl
613 *
614 * Description:
615 * Move an SGL from the user's work request struct into a CCIL Work Request
616 * message, swapping to WR byte order and ensure the total length doesn't
617 * overflow.
618 *
619 * IN:
620 * dst - ptr to CCIL Work Request message SGL memory.
621 * src - ptr to the consumers SGL memory.
622 *
623 * OUT: none
624 *
625 * Return:
626 * CCIL status codes.
627 */
628static int
629move_sgl(struct c2_data_addr * dst, struct ib_sge *src, int count, u32 * p_len,
630 u8 * actual_count)
631{
632 u32 tot = 0; /* running total */
633 u8 acount = 0; /* running total non-0 len sge's */
634
635 while (count > 0) {
636 /*
637 * If the addition of this SGE causes the
638 * total SGL length to exceed 2^32-1, then
639 * fail-n-bail.
640 *
641 * If the current total plus the next element length
642 * wraps, then it will go negative and be less than the
643 * current total...
644 */
645 if ((tot + src->length) < tot) {
646 return -EINVAL;
647 }
648 /*
649 * Bug: 1456 (as well as 1498 & 1643)
650 * Skip over any sge's supplied with len=0
651 */
652 if (src->length) {
653 tot += src->length;
654 dst->stag = cpu_to_be32(src->lkey);
655 dst->to = cpu_to_be64(src->addr);
656 dst->length = cpu_to_be32(src->length);
657 dst++;
658 acount++;
659 }
660 src++;
661 count--;
662 }
663
664 if (acount == 0) {
665 /*
666 * Bug: 1476 (as well as 1498, 1456 and 1643)
667 * Setup the SGL in the WR to make it easier for the RNIC.
668 * This way, the FW doesn't have to deal with special cases.
669 * Setting length=0 should be sufficient.
670 */
671 dst->stag = 0;
672 dst->to = 0;
673 dst->length = 0;
674 }
675
676 *p_len = tot;
677 *actual_count = acount;
678 return 0;
679}
680
681/*
682 * Function: c2_activity (private function)
683 *
684 * Description:
685 * Post an mq index to the host->adapter activity fifo.
686 *
687 * IN:
688 * c2dev - ptr to c2dev structure
689 * mq_index - mq index to post
690 * shared - value most recently written to shared
691 *
692 * OUT:
693 *
694 * Return:
695 * none
696 */
697static inline void c2_activity(struct c2_dev *c2dev, u32 mq_index, u16 shared)
698{
699 /*
700 * First read the register to see if the FIFO is full, and if so,
701 * spin until it's not. This isn't perfect -- there is no
702 * synchronization among the clients of the register, but in
703 * practice it prevents multiple CPU from hammering the bus
704 * with PCI RETRY. Note that when this does happen, the card
705 * cannot get on the bus and the card and system hang in a
706 * deadlock -- thus the need for this code. [TOT]
707 */
708 while (readl(c2dev->regs + PCI_BAR0_ADAPTER_HINT) & 0x80000000) {
709 set_current_state(TASK_UNINTERRUPTIBLE);
710 schedule_timeout(0);
711 }
712
713 __raw_writel(C2_HINT_MAKE(mq_index, shared),
714 c2dev->regs + PCI_BAR0_ADAPTER_HINT);
715}
716
717/*
718 * Function: qp_wr_post
719 *
720 * Description:
721 * This in-line function allocates a MQ msg, then moves the host-copy of
722 * the completed WR into msg. Then it posts the message.
723 *
724 * IN:
725 * q - ptr to user MQ.
726 * wr - ptr to host-copy of the WR.
727 * qp - ptr to user qp
728 * size - Number of bytes to post. Assumed to be divisible by 4.
729 *
730 * OUT: none
731 *
732 * Return:
733 * CCIL status codes.
734 */
735static int qp_wr_post(struct c2_mq *q, union c2wr * wr, struct c2_qp *qp, u32 size)
736{
737 union c2wr *msg;
738
739 msg = c2_mq_alloc(q);
740 if (msg == NULL) {
741 return -EINVAL;
742 }
743#ifdef CCMSGMAGIC
744 ((c2wr_hdr_t *) wr)->magic = cpu_to_be32(CCWR_MAGIC);
745#endif
746
747 /*
748 * Since all header fields in the WR are the same as the
749 * CQE, set the following so the adapter need not.
750 */
751 c2_wr_set_result(wr, CCERR_PENDING);
752
753 /*
754 * Copy the wr down to the adapter
755 */
756 memcpy((void *) msg, (void *) wr, size);
757
758 c2_mq_produce(q);
759 return 0;
760}
761
762
763int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
764 struct ib_send_wr **bad_wr)
765{
766 struct c2_dev *c2dev = to_c2dev(ibqp->device);
767 struct c2_qp *qp = to_c2qp(ibqp);
768 union c2wr wr;
769 int err = 0;
770
771 u32 flags;
772 u32 tot_len;
773 u8 actual_sge_count;
774 u32 msg_size;
775
776 if (qp->state > IB_QPS_RTS)
777 return -EINVAL;
778
779 while (ib_wr) {
780
781 flags = 0;
782 wr.sqwr.sq_hdr.user_hdr.hdr.context = ib_wr->wr_id;
783 if (ib_wr->send_flags & IB_SEND_SIGNALED) {
784 flags |= SQ_SIGNALED;
785 }
786
787 switch (ib_wr->opcode) {
788 case IB_WR_SEND:
789 if (ib_wr->send_flags & IB_SEND_SOLICITED) {
790 c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE);
791 msg_size = sizeof(struct c2wr_send_req);
792 } else {
793 c2_wr_set_id(&wr, C2_WR_TYPE_SEND);
794 msg_size = sizeof(struct c2wr_send_req);
795 }
796
797 wr.sqwr.send.remote_stag = 0;
798 msg_size += sizeof(struct c2_data_addr) * ib_wr->num_sge;
799 if (ib_wr->num_sge > qp->send_sgl_depth) {
800 err = -EINVAL;
801 break;
802 }
803 if (ib_wr->send_flags & IB_SEND_FENCE) {
804 flags |= SQ_READ_FENCE;
805 }
806 err = move_sgl((struct c2_data_addr *) & (wr.sqwr.send.data),
807 ib_wr->sg_list,
808 ib_wr->num_sge,
809 &tot_len, &actual_sge_count);
810 wr.sqwr.send.sge_len = cpu_to_be32(tot_len);
811 c2_wr_set_sge_count(&wr, actual_sge_count);
812 break;
813 case IB_WR_RDMA_WRITE:
814 c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_WRITE);
815 msg_size = sizeof(struct c2wr_rdma_write_req) +
816 (sizeof(struct c2_data_addr) * ib_wr->num_sge);
817 if (ib_wr->num_sge > qp->rdma_write_sgl_depth) {
818 err = -EINVAL;
819 break;
820 }
821 if (ib_wr->send_flags & IB_SEND_FENCE) {
822 flags |= SQ_READ_FENCE;
823 }
824 wr.sqwr.rdma_write.remote_stag =
825 cpu_to_be32(ib_wr->wr.rdma.rkey);
826 wr.sqwr.rdma_write.remote_to =
827 cpu_to_be64(ib_wr->wr.rdma.remote_addr);
828 err = move_sgl((struct c2_data_addr *)
829 & (wr.sqwr.rdma_write.data),
830 ib_wr->sg_list,
831 ib_wr->num_sge,
832 &tot_len, &actual_sge_count);
833 wr.sqwr.rdma_write.sge_len = cpu_to_be32(tot_len);
834 c2_wr_set_sge_count(&wr, actual_sge_count);
835 break;
836 case IB_WR_RDMA_READ:
837 c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_READ);
838 msg_size = sizeof(struct c2wr_rdma_read_req);
839
840 /* IWarp only suppots 1 sge for RDMA reads */
841 if (ib_wr->num_sge > 1) {
842 err = -EINVAL;
843 break;
844 }
845
846 /*
847 * Move the local and remote stag/to/len into the WR.
848 */
849 wr.sqwr.rdma_read.local_stag =
850 cpu_to_be32(ib_wr->sg_list->lkey);
851 wr.sqwr.rdma_read.local_to =
852 cpu_to_be64(ib_wr->sg_list->addr);
853 wr.sqwr.rdma_read.remote_stag =
854 cpu_to_be32(ib_wr->wr.rdma.rkey);
855 wr.sqwr.rdma_read.remote_to =
856 cpu_to_be64(ib_wr->wr.rdma.remote_addr);
857 wr.sqwr.rdma_read.length =
858 cpu_to_be32(ib_wr->sg_list->length);
859 break;
860 default:
861 /* error */
862 msg_size = 0;
863 err = -EINVAL;
864 break;
865 }
866
867 /*
868 * If we had an error on the last wr build, then
869 * break out. Possible errors include bogus WR
870 * type, and a bogus SGL length...
871 */
872 if (err) {
873 break;
874 }
875
876 /*
877 * Store flags
878 */
879 c2_wr_set_flags(&wr, flags);
880
881 /*
882 * Post the puppy!
883 */
884 err = qp_wr_post(&qp->sq_mq, &wr, qp, msg_size);
885 if (err) {
886 break;
887 }
888
889 /*
890 * Enqueue mq index to activity FIFO.
891 */
892 c2_activity(c2dev, qp->sq_mq.index, qp->sq_mq.hint_count);
893
894 ib_wr = ib_wr->next;
895 }
896
897 if (err)
898 *bad_wr = ib_wr;
899 return err;
900}
901
902int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
903 struct ib_recv_wr **bad_wr)
904{
905 struct c2_dev *c2dev = to_c2dev(ibqp->device);
906 struct c2_qp *qp = to_c2qp(ibqp);
907 union c2wr wr;
908 int err = 0;
909
910 if (qp->state > IB_QPS_RTS)
911 return -EINVAL;
912
913 /*
914 * Try and post each work request
915 */
916 while (ib_wr) {
917 u32 tot_len;
918 u8 actual_sge_count;
919
920 if (ib_wr->num_sge > qp->recv_sgl_depth) {
921 err = -EINVAL;
922 break;
923 }
924
925 /*
926 * Create local host-copy of the WR
927 */
928 wr.rqwr.rq_hdr.user_hdr.hdr.context = ib_wr->wr_id;
929 c2_wr_set_id(&wr, CCWR_RECV);
930 c2_wr_set_flags(&wr, 0);
931
932 /* sge_count is limited to eight bits. */
933 BUG_ON(ib_wr->num_sge >= 256);
934 err = move_sgl((struct c2_data_addr *) & (wr.rqwr.data),
935 ib_wr->sg_list,
936 ib_wr->num_sge, &tot_len, &actual_sge_count);
937 c2_wr_set_sge_count(&wr, actual_sge_count);
938
939 /*
940 * If we had an error on the last wr build, then
941 * break out. Possible errors include bogus WR
942 * type, and a bogus SGL length...
943 */
944 if (err) {
945 break;
946 }
947
948 err = qp_wr_post(&qp->rq_mq, &wr, qp, qp->rq_mq.msg_size);
949 if (err) {
950 break;
951 }
952
953 /*
954 * Enqueue mq index to activity FIFO
955 */
956 c2_activity(c2dev, qp->rq_mq.index, qp->rq_mq.hint_count);
957
958 ib_wr = ib_wr->next;
959 }
960
961 if (err)
962 *bad_wr = ib_wr;
963 return err;
964}
965
966void __devinit c2_init_qp_table(struct c2_dev *c2dev)
967{
968 spin_lock_init(&c2dev->qp_table.lock);
969 idr_init(&c2dev->qp_table.idr);
970}
971
972void __devexit c2_cleanup_qp_table(struct c2_dev *c2dev)
973{
974 idr_destroy(&c2dev->qp_table.idr);
975}
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
new file mode 100644
index 000000000000..f49a32b7a8f6
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -0,0 +1,664 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 */
34
35
36#include <linux/module.h>
37#include <linux/moduleparam.h>
38#include <linux/pci.h>
39#include <linux/netdevice.h>
40#include <linux/etherdevice.h>
41#include <linux/delay.h>
42#include <linux/ethtool.h>
43#include <linux/mii.h>
44#include <linux/if_vlan.h>
45#include <linux/crc32.h>
46#include <linux/in.h>
47#include <linux/ip.h>
48#include <linux/tcp.h>
49#include <linux/init.h>
50#include <linux/dma-mapping.h>
51#include <linux/mm.h>
52#include <linux/inet.h>
53#include <linux/vmalloc.h>
54
55#include <linux/route.h>
56
57#include <asm/io.h>
58#include <asm/irq.h>
59#include <asm/byteorder.h>
60#include <rdma/ib_smi.h>
61#include "c2.h"
62#include "c2_vq.h"
63
64/* Device capabilities */
65#define C2_MIN_PAGESIZE 1024
66
67#define C2_MAX_MRS 32768
68#define C2_MAX_QPS 16000
69#define C2_MAX_WQE_SZ 256
70#define C2_MAX_QP_WR ((128*1024)/C2_MAX_WQE_SZ)
71#define C2_MAX_SGES 4
72#define C2_MAX_SGE_RD 1
73#define C2_MAX_CQS 32768
74#define C2_MAX_CQES 4096
75#define C2_MAX_PDS 16384
76
77/*
78 * Send the adapter INIT message to the amso1100
79 */
80static int c2_adapter_init(struct c2_dev *c2dev)
81{
82 struct c2wr_init_req wr;
83 int err;
84
85 memset(&wr, 0, sizeof(wr));
86 c2_wr_set_id(&wr, CCWR_INIT);
87 wr.hdr.context = 0;
88 wr.hint_count = cpu_to_be64(c2dev->hint_count_dma);
89 wr.q0_host_shared = cpu_to_be64(c2dev->req_vq.shared_dma);
90 wr.q1_host_shared = cpu_to_be64(c2dev->rep_vq.shared_dma);
91 wr.q1_host_msg_pool = cpu_to_be64(c2dev->rep_vq.host_dma);
92 wr.q2_host_shared = cpu_to_be64(c2dev->aeq.shared_dma);
93 wr.q2_host_msg_pool = cpu_to_be64(c2dev->aeq.host_dma);
94
95 /* Post the init message */
96 err = vq_send_wr(c2dev, (union c2wr *) & wr);
97
98 return err;
99}
100
101/*
102 * Send the adapter TERM message to the amso1100
103 */
104static void c2_adapter_term(struct c2_dev *c2dev)
105{
106 struct c2wr_init_req wr;
107
108 memset(&wr, 0, sizeof(wr));
109 c2_wr_set_id(&wr, CCWR_TERM);
110 wr.hdr.context = 0;
111
112 /* Post the init message */
113 vq_send_wr(c2dev, (union c2wr *) & wr);
114 c2dev->init = 0;
115
116 return;
117}
118
119/*
120 * Query the adapter
121 */
122static int c2_rnic_query(struct c2_dev *c2dev, struct ib_device_attr *props)
123{
124 struct c2_vq_req *vq_req;
125 struct c2wr_rnic_query_req wr;
126 struct c2wr_rnic_query_rep *reply;
127 int err;
128
129 vq_req = vq_req_alloc(c2dev);
130 if (!vq_req)
131 return -ENOMEM;
132
133 c2_wr_set_id(&wr, CCWR_RNIC_QUERY);
134 wr.hdr.context = (unsigned long) vq_req;
135 wr.rnic_handle = c2dev->adapter_handle;
136
137 vq_req_get(c2dev, vq_req);
138
139 err = vq_send_wr(c2dev, (union c2wr *) &wr);
140 if (err) {
141 vq_req_put(c2dev, vq_req);
142 goto bail1;
143 }
144
145 err = vq_wait_for_reply(c2dev, vq_req);
146 if (err)
147 goto bail1;
148
149 reply =
150 (struct c2wr_rnic_query_rep *) (unsigned long) (vq_req->reply_msg);
151 if (!reply)
152 err = -ENOMEM;
153
154 err = c2_errno(reply);
155 if (err)
156 goto bail2;
157
158 props->fw_ver =
159 ((u64)be32_to_cpu(reply->fw_ver_major) << 32) |
160 ((be32_to_cpu(reply->fw_ver_minor) && 0xFFFF) << 16) |
161 (be32_to_cpu(reply->fw_ver_patch) && 0xFFFF);
162 memcpy(&props->sys_image_guid, c2dev->netdev->dev_addr, 6);
163 props->max_mr_size = 0xFFFFFFFF;
164 props->page_size_cap = ~(C2_MIN_PAGESIZE-1);
165 props->vendor_id = be32_to_cpu(reply->vendor_id);
166 props->vendor_part_id = be32_to_cpu(reply->part_number);
167 props->hw_ver = be32_to_cpu(reply->hw_version);
168 props->max_qp = be32_to_cpu(reply->max_qps);
169 props->max_qp_wr = be32_to_cpu(reply->max_qp_depth);
170 props->device_cap_flags = c2dev->device_cap_flags;
171 props->max_sge = C2_MAX_SGES;
172 props->max_sge_rd = C2_MAX_SGE_RD;
173 props->max_cq = be32_to_cpu(reply->max_cqs);
174 props->max_cqe = be32_to_cpu(reply->max_cq_depth);
175 props->max_mr = be32_to_cpu(reply->max_mrs);
176 props->max_pd = be32_to_cpu(reply->max_pds);
177 props->max_qp_rd_atom = be32_to_cpu(reply->max_qp_ird);
178 props->max_ee_rd_atom = 0;
179 props->max_res_rd_atom = be32_to_cpu(reply->max_global_ird);
180 props->max_qp_init_rd_atom = be32_to_cpu(reply->max_qp_ord);
181 props->max_ee_init_rd_atom = 0;
182 props->atomic_cap = IB_ATOMIC_NONE;
183 props->max_ee = 0;
184 props->max_rdd = 0;
185 props->max_mw = be32_to_cpu(reply->max_mws);
186 props->max_raw_ipv6_qp = 0;
187 props->max_raw_ethy_qp = 0;
188 props->max_mcast_grp = 0;
189 props->max_mcast_qp_attach = 0;
190 props->max_total_mcast_qp_attach = 0;
191 props->max_ah = 0;
192 props->max_fmr = 0;
193 props->max_map_per_fmr = 0;
194 props->max_srq = 0;
195 props->max_srq_wr = 0;
196 props->max_srq_sge = 0;
197 props->max_pkeys = 0;
198 props->local_ca_ack_delay = 0;
199
200 bail2:
201 vq_repbuf_free(c2dev, reply);
202
203 bail1:
204 vq_req_free(c2dev, vq_req);
205 return err;
206}
207
208/*
209 * Add an IP address to the RNIC interface
210 */
211int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask)
212{
213 struct c2_vq_req *vq_req;
214 struct c2wr_rnic_setconfig_req *wr;
215 struct c2wr_rnic_setconfig_rep *reply;
216 struct c2_netaddr netaddr;
217 int err, len;
218
219 vq_req = vq_req_alloc(c2dev);
220 if (!vq_req)
221 return -ENOMEM;
222
223 len = sizeof(struct c2_netaddr);
224 wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
225 if (!wr) {
226 err = -ENOMEM;
227 goto bail0;
228 }
229
230 c2_wr_set_id(wr, CCWR_RNIC_SETCONFIG);
231 wr->hdr.context = (unsigned long) vq_req;
232 wr->rnic_handle = c2dev->adapter_handle;
233 wr->option = cpu_to_be32(C2_CFG_ADD_ADDR);
234
235 netaddr.ip_addr = inaddr;
236 netaddr.netmask = inmask;
237 netaddr.mtu = 0;
238
239 memcpy(wr->data, &netaddr, len);
240
241 vq_req_get(c2dev, vq_req);
242
243 err = vq_send_wr(c2dev, (union c2wr *) wr);
244 if (err) {
245 vq_req_put(c2dev, vq_req);
246 goto bail1;
247 }
248
249 err = vq_wait_for_reply(c2dev, vq_req);
250 if (err)
251 goto bail1;
252
253 reply =
254 (struct c2wr_rnic_setconfig_rep *) (unsigned long) (vq_req->reply_msg);
255 if (!reply) {
256 err = -ENOMEM;
257 goto bail1;
258 }
259
260 err = c2_errno(reply);
261 vq_repbuf_free(c2dev, reply);
262
263 bail1:
264 kfree(wr);
265 bail0:
266 vq_req_free(c2dev, vq_req);
267 return err;
268}
269
270/*
271 * Delete an IP address from the RNIC interface
272 */
273int c2_del_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask)
274{
275 struct c2_vq_req *vq_req;
276 struct c2wr_rnic_setconfig_req *wr;
277 struct c2wr_rnic_setconfig_rep *reply;
278 struct c2_netaddr netaddr;
279 int err, len;
280
281 vq_req = vq_req_alloc(c2dev);
282 if (!vq_req)
283 return -ENOMEM;
284
285 len = sizeof(struct c2_netaddr);
286 wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
287 if (!wr) {
288 err = -ENOMEM;
289 goto bail0;
290 }
291
292 c2_wr_set_id(wr, CCWR_RNIC_SETCONFIG);
293 wr->hdr.context = (unsigned long) vq_req;
294 wr->rnic_handle = c2dev->adapter_handle;
295 wr->option = cpu_to_be32(C2_CFG_DEL_ADDR);
296
297 netaddr.ip_addr = inaddr;
298 netaddr.netmask = inmask;
299 netaddr.mtu = 0;
300
301 memcpy(wr->data, &netaddr, len);
302
303 vq_req_get(c2dev, vq_req);
304
305 err = vq_send_wr(c2dev, (union c2wr *) wr);
306 if (err) {
307 vq_req_put(c2dev, vq_req);
308 goto bail1;
309 }
310
311 err = vq_wait_for_reply(c2dev, vq_req);
312 if (err)
313 goto bail1;
314
315 reply =
316 (struct c2wr_rnic_setconfig_rep *) (unsigned long) (vq_req->reply_msg);
317 if (!reply) {
318 err = -ENOMEM;
319 goto bail1;
320 }
321
322 err = c2_errno(reply);
323 vq_repbuf_free(c2dev, reply);
324
325 bail1:
326 kfree(wr);
327 bail0:
328 vq_req_free(c2dev, vq_req);
329 return err;
330}
331
332/*
333 * Open a single RNIC instance to use with all
334 * low level openib calls
335 */
336static int c2_rnic_open(struct c2_dev *c2dev)
337{
338 struct c2_vq_req *vq_req;
339 union c2wr wr;
340 struct c2wr_rnic_open_rep *reply;
341 int err;
342
343 vq_req = vq_req_alloc(c2dev);
344 if (vq_req == NULL) {
345 return -ENOMEM;
346 }
347
348 memset(&wr, 0, sizeof(wr));
349 c2_wr_set_id(&wr, CCWR_RNIC_OPEN);
350 wr.rnic_open.req.hdr.context = (unsigned long) (vq_req);
351 wr.rnic_open.req.flags = cpu_to_be16(RNIC_PRIV_MODE);
352 wr.rnic_open.req.port_num = cpu_to_be16(0);
353 wr.rnic_open.req.user_context = (unsigned long) c2dev;
354
355 vq_req_get(c2dev, vq_req);
356
357 err = vq_send_wr(c2dev, &wr);
358 if (err) {
359 vq_req_put(c2dev, vq_req);
360 goto bail0;
361 }
362
363 err = vq_wait_for_reply(c2dev, vq_req);
364 if (err) {
365 goto bail0;
366 }
367
368 reply = (struct c2wr_rnic_open_rep *) (unsigned long) (vq_req->reply_msg);
369 if (!reply) {
370 err = -ENOMEM;
371 goto bail0;
372 }
373
374 if ((err = c2_errno(reply)) != 0) {
375 goto bail1;
376 }
377
378 c2dev->adapter_handle = reply->rnic_handle;
379
380 bail1:
381 vq_repbuf_free(c2dev, reply);
382 bail0:
383 vq_req_free(c2dev, vq_req);
384 return err;
385}
386
387/*
388 * Close the RNIC instance
389 */
390static int c2_rnic_close(struct c2_dev *c2dev)
391{
392 struct c2_vq_req *vq_req;
393 union c2wr wr;
394 struct c2wr_rnic_close_rep *reply;
395 int err;
396
397 vq_req = vq_req_alloc(c2dev);
398 if (vq_req == NULL) {
399 return -ENOMEM;
400 }
401
402 memset(&wr, 0, sizeof(wr));
403 c2_wr_set_id(&wr, CCWR_RNIC_CLOSE);
404 wr.rnic_close.req.hdr.context = (unsigned long) vq_req;
405 wr.rnic_close.req.rnic_handle = c2dev->adapter_handle;
406
407 vq_req_get(c2dev, vq_req);
408
409 err = vq_send_wr(c2dev, &wr);
410 if (err) {
411 vq_req_put(c2dev, vq_req);
412 goto bail0;
413 }
414
415 err = vq_wait_for_reply(c2dev, vq_req);
416 if (err) {
417 goto bail0;
418 }
419
420 reply = (struct c2wr_rnic_close_rep *) (unsigned long) (vq_req->reply_msg);
421 if (!reply) {
422 err = -ENOMEM;
423 goto bail0;
424 }
425
426 if ((err = c2_errno(reply)) != 0) {
427 goto bail1;
428 }
429
430 c2dev->adapter_handle = 0;
431
432 bail1:
433 vq_repbuf_free(c2dev, reply);
434 bail0:
435 vq_req_free(c2dev, vq_req);
436 return err;
437}
438
439/*
440 * Called by c2_probe to initialize the RNIC. This principally
441 * involves initalizing the various limits and resouce pools that
442 * comprise the RNIC instance.
443 */
444int c2_rnic_init(struct c2_dev *c2dev)
445{
446 int err;
447 u32 qsize, msgsize;
448 void *q1_pages;
449 void *q2_pages;
450 void __iomem *mmio_regs;
451
452 /* Device capabilities */
453 c2dev->device_cap_flags =
454 (IB_DEVICE_RESIZE_MAX_WR |
455 IB_DEVICE_CURR_QP_STATE_MOD |
456 IB_DEVICE_SYS_IMAGE_GUID |
457 IB_DEVICE_ZERO_STAG |
458 IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW);
459
460 /* Allocate the qptr_array */
461 c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *));
462 if (!c2dev->qptr_array) {
463 return -ENOMEM;
464 }
465
466 /* Inialize the qptr_array */
467 memset(c2dev->qptr_array, 0, C2_MAX_CQS * sizeof(void *));
468 c2dev->qptr_array[0] = (void *) &c2dev->req_vq;
469 c2dev->qptr_array[1] = (void *) &c2dev->rep_vq;
470 c2dev->qptr_array[2] = (void *) &c2dev->aeq;
471
472 /* Initialize data structures */
473 init_waitqueue_head(&c2dev->req_vq_wo);
474 spin_lock_init(&c2dev->vqlock);
475 spin_lock_init(&c2dev->lock);
476
477 /* Allocate MQ shared pointer pool for kernel clients. User
478 * mode client pools are hung off the user context
479 */
480 err = c2_init_mqsp_pool(c2dev, GFP_KERNEL, &c2dev->kern_mqsp_pool);
481 if (err) {
482 goto bail0;
483 }
484
485 /* Allocate shared pointers for Q0, Q1, and Q2 from
486 * the shared pointer pool.
487 */
488
489 c2dev->hint_count = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
490 &c2dev->hint_count_dma,
491 GFP_KERNEL);
492 c2dev->req_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
493 &c2dev->req_vq.shared_dma,
494 GFP_KERNEL);
495 c2dev->rep_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
496 &c2dev->rep_vq.shared_dma,
497 GFP_KERNEL);
498 c2dev->aeq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
499 &c2dev->aeq.shared_dma, GFP_KERNEL);
500 if (!c2dev->hint_count || !c2dev->req_vq.shared ||
501 !c2dev->rep_vq.shared || !c2dev->aeq.shared) {
502 err = -ENOMEM;
503 goto bail1;
504 }
505
506 mmio_regs = c2dev->kva;
507 /* Initialize the Verbs Request Queue */
508 c2_mq_req_init(&c2dev->req_vq, 0,
509 be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_QSIZE)),
510 be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_MSGSIZE)),
511 mmio_regs +
512 be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_POOLSTART)),
513 mmio_regs +
514 be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_SHARED)),
515 C2_MQ_ADAPTER_TARGET);
516
517 /* Initialize the Verbs Reply Queue */
518 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE));
519 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE));
520 q1_pages = kmalloc(qsize * msgsize, GFP_KERNEL);
521 if (!q1_pages) {
522 err = -ENOMEM;
523 goto bail1;
524 }
525 c2dev->rep_vq.host_dma = dma_map_single(c2dev->ibdev.dma_device,
526 (void *)q1_pages, qsize * msgsize,
527 DMA_FROM_DEVICE);
528 pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
529 pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages,
530 (u64)c2dev->rep_vq.host_dma);
531 c2_mq_rep_init(&c2dev->rep_vq,
532 1,
533 qsize,
534 msgsize,
535 q1_pages,
536 mmio_regs +
537 be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_SHARED)),
538 C2_MQ_HOST_TARGET);
539
540 /* Initialize the Asynchronus Event Queue */
541 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE));
542 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE));
543 q2_pages = kmalloc(qsize * msgsize, GFP_KERNEL);
544 if (!q2_pages) {
545 err = -ENOMEM;
546 goto bail2;
547 }
548 c2dev->aeq.host_dma = dma_map_single(c2dev->ibdev.dma_device,
549 (void *)q2_pages, qsize * msgsize,
550 DMA_FROM_DEVICE);
551 pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
552 pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q1_pages,
553 (u64)c2dev->rep_vq.host_dma);
554 c2_mq_rep_init(&c2dev->aeq,
555 2,
556 qsize,
557 msgsize,
558 q2_pages,
559 mmio_regs +
560 be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_SHARED)),
561 C2_MQ_HOST_TARGET);
562
563 /* Initialize the verbs request allocator */
564 err = vq_init(c2dev);
565 if (err)
566 goto bail3;
567
568 /* Enable interrupts on the adapter */
569 writel(0, c2dev->regs + C2_IDIS);
570
571 /* create the WR init message */
572 err = c2_adapter_init(c2dev);
573 if (err)
574 goto bail4;
575 c2dev->init++;
576
577 /* open an adapter instance */
578 err = c2_rnic_open(c2dev);
579 if (err)
580 goto bail4;
581
582 /* Initialize cached the adapter limits */
583 if (c2_rnic_query(c2dev, &c2dev->props))
584 goto bail5;
585
586 /* Initialize the PD pool */
587 err = c2_init_pd_table(c2dev);
588 if (err)
589 goto bail5;
590
591 /* Initialize the QP pool */
592 c2_init_qp_table(c2dev);
593 return 0;
594
595 bail5:
596 c2_rnic_close(c2dev);
597 bail4:
598 vq_term(c2dev);
599 bail3:
600 dma_unmap_single(c2dev->ibdev.dma_device,
601 pci_unmap_addr(&c2dev->aeq, mapping),
602 c2dev->aeq.q_size * c2dev->aeq.msg_size,
603 DMA_FROM_DEVICE);
604 kfree(q2_pages);
605 bail2:
606 dma_unmap_single(c2dev->ibdev.dma_device,
607 pci_unmap_addr(&c2dev->rep_vq, mapping),
608 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
609 DMA_FROM_DEVICE);
610 kfree(q1_pages);
611 bail1:
612 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
613 bail0:
614 vfree(c2dev->qptr_array);
615
616 return err;
617}
618
619/*
620 * Called by c2_remove to cleanup the RNIC resources.
621 */
622void c2_rnic_term(struct c2_dev *c2dev)
623{
624
625 /* Close the open adapter instance */
626 c2_rnic_close(c2dev);
627
628 /* Send the TERM message to the adapter */
629 c2_adapter_term(c2dev);
630
631 /* Disable interrupts on the adapter */
632 writel(1, c2dev->regs + C2_IDIS);
633
634 /* Free the QP pool */
635 c2_cleanup_qp_table(c2dev);
636
637 /* Free the PD pool */
638 c2_cleanup_pd_table(c2dev);
639
640 /* Free the verbs request allocator */
641 vq_term(c2dev);
642
643 /* Unmap and free the asynchronus event queue */
644 dma_unmap_single(c2dev->ibdev.dma_device,
645 pci_unmap_addr(&c2dev->aeq, mapping),
646 c2dev->aeq.q_size * c2dev->aeq.msg_size,
647 DMA_FROM_DEVICE);
648 kfree(c2dev->aeq.msg_pool.host);
649
650 /* Unmap and free the verbs reply queue */
651 dma_unmap_single(c2dev->ibdev.dma_device,
652 pci_unmap_addr(&c2dev->rep_vq, mapping),
653 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
654 DMA_FROM_DEVICE);
655 kfree(c2dev->rep_vq.msg_pool.host);
656
657 /* Free the MQ shared pointer pool */
658 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
659
660 /* Free the qptr_array */
661 vfree(c2dev->qptr_array);
662
663 return;
664}
diff --git a/drivers/infiniband/hw/amso1100/c2_status.h b/drivers/infiniband/hw/amso1100/c2_status.h
new file mode 100644
index 000000000000..6ee4aa92d875
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_status.h
@@ -0,0 +1,158 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef _C2_STATUS_H_
34#define _C2_STATUS_H_
35
36/*
37 * Verbs Status Codes
38 */
39enum c2_status {
40 C2_OK = 0, /* This must be zero */
41 CCERR_INSUFFICIENT_RESOURCES = 1,
42 CCERR_INVALID_MODIFIER = 2,
43 CCERR_INVALID_MODE = 3,
44 CCERR_IN_USE = 4,
45 CCERR_INVALID_RNIC = 5,
46 CCERR_INTERRUPTED_OPERATION = 6,
47 CCERR_INVALID_EH = 7,
48 CCERR_INVALID_CQ = 8,
49 CCERR_CQ_EMPTY = 9,
50 CCERR_NOT_IMPLEMENTED = 10,
51 CCERR_CQ_DEPTH_TOO_SMALL = 11,
52 CCERR_PD_IN_USE = 12,
53 CCERR_INVALID_PD = 13,
54 CCERR_INVALID_SRQ = 14,
55 CCERR_INVALID_ADDRESS = 15,
56 CCERR_INVALID_NETMASK = 16,
57 CCERR_INVALID_QP = 17,
58 CCERR_INVALID_QP_STATE = 18,
59 CCERR_TOO_MANY_WRS_POSTED = 19,
60 CCERR_INVALID_WR_TYPE = 20,
61 CCERR_INVALID_SGL_LENGTH = 21,
62 CCERR_INVALID_SQ_DEPTH = 22,
63 CCERR_INVALID_RQ_DEPTH = 23,
64 CCERR_INVALID_ORD = 24,
65 CCERR_INVALID_IRD = 25,
66 CCERR_QP_ATTR_CANNOT_CHANGE = 26,
67 CCERR_INVALID_STAG = 27,
68 CCERR_QP_IN_USE = 28,
69 CCERR_OUTSTANDING_WRS = 29,
70 CCERR_STAG_IN_USE = 30,
71 CCERR_INVALID_STAG_INDEX = 31,
72 CCERR_INVALID_SGL_FORMAT = 32,
73 CCERR_ADAPTER_TIMEOUT = 33,
74 CCERR_INVALID_CQ_DEPTH = 34,
75 CCERR_INVALID_PRIVATE_DATA_LENGTH = 35,
76 CCERR_INVALID_EP = 36,
77 CCERR_MR_IN_USE = CCERR_STAG_IN_USE,
78 CCERR_FLUSHED = 38,
79 CCERR_INVALID_WQE = 39,
80 CCERR_LOCAL_QP_CATASTROPHIC_ERROR = 40,
81 CCERR_REMOTE_TERMINATION_ERROR = 41,
82 CCERR_BASE_AND_BOUNDS_VIOLATION = 42,
83 CCERR_ACCESS_VIOLATION = 43,
84 CCERR_INVALID_PD_ID = 44,
85 CCERR_WRAP_ERROR = 45,
86 CCERR_INV_STAG_ACCESS_ERROR = 46,
87 CCERR_ZERO_RDMA_READ_RESOURCES = 47,
88 CCERR_QP_NOT_PRIVILEGED = 48,
89 CCERR_STAG_STATE_NOT_INVALID = 49,
90 CCERR_INVALID_PAGE_SIZE = 50,
91 CCERR_INVALID_BUFFER_SIZE = 51,
92 CCERR_INVALID_PBE = 52,
93 CCERR_INVALID_FBO = 53,
94 CCERR_INVALID_LENGTH = 54,
95 CCERR_INVALID_ACCESS_RIGHTS = 55,
96 CCERR_PBL_TOO_BIG = 56,
97 CCERR_INVALID_VA = 57,
98 CCERR_INVALID_REGION = 58,
99 CCERR_INVALID_WINDOW = 59,
100 CCERR_TOTAL_LENGTH_TOO_BIG = 60,
101 CCERR_INVALID_QP_ID = 61,
102 CCERR_ADDR_IN_USE = 62,
103 CCERR_ADDR_NOT_AVAIL = 63,
104 CCERR_NET_DOWN = 64,
105 CCERR_NET_UNREACHABLE = 65,
106 CCERR_CONN_ABORTED = 66,
107 CCERR_CONN_RESET = 67,
108 CCERR_NO_BUFS = 68,
109 CCERR_CONN_TIMEDOUT = 69,
110 CCERR_CONN_REFUSED = 70,
111 CCERR_HOST_UNREACHABLE = 71,
112 CCERR_INVALID_SEND_SGL_DEPTH = 72,
113 CCERR_INVALID_RECV_SGL_DEPTH = 73,
114 CCERR_INVALID_RDMA_WRITE_SGL_DEPTH = 74,
115 CCERR_INSUFFICIENT_PRIVILEGES = 75,
116 CCERR_STACK_ERROR = 76,
117 CCERR_INVALID_VERSION = 77,
118 CCERR_INVALID_MTU = 78,
119 CCERR_INVALID_IMAGE = 79,
120 CCERR_PENDING = 98, /* not an error; user internally by adapter */
121 CCERR_DEFER = 99, /* not an error; used internally by adapter */
122 CCERR_FAILED_WRITE = 100,
123 CCERR_FAILED_ERASE = 101,
124 CCERR_FAILED_VERIFICATION = 102,
125 CCERR_NOT_FOUND = 103,
126
127};
128
129/*
130 * CCAE_ACTIVE_CONNECT_RESULTS status result codes.
131 */
132enum c2_connect_status {
133 C2_CONN_STATUS_SUCCESS = C2_OK,
134 C2_CONN_STATUS_NO_MEM = CCERR_INSUFFICIENT_RESOURCES,
135 C2_CONN_STATUS_TIMEDOUT = CCERR_CONN_TIMEDOUT,
136 C2_CONN_STATUS_REFUSED = CCERR_CONN_REFUSED,
137 C2_CONN_STATUS_NETUNREACH = CCERR_NET_UNREACHABLE,
138 C2_CONN_STATUS_HOSTUNREACH = CCERR_HOST_UNREACHABLE,
139 C2_CONN_STATUS_INVALID_RNIC = CCERR_INVALID_RNIC,
140 C2_CONN_STATUS_INVALID_QP = CCERR_INVALID_QP,
141 C2_CONN_STATUS_INVALID_QP_STATE = CCERR_INVALID_QP_STATE,
142 C2_CONN_STATUS_REJECTED = CCERR_CONN_RESET,
143 C2_CONN_STATUS_ADDR_NOT_AVAIL = CCERR_ADDR_NOT_AVAIL,
144};
145
146/*
147 * Flash programming status codes.
148 */
149enum c2_flash_status {
150 C2_FLASH_STATUS_SUCCESS = 0x0000,
151 C2_FLASH_STATUS_VERIFY_ERR = 0x0002,
152 C2_FLASH_STATUS_IMAGE_ERR = 0x0004,
153 C2_FLASH_STATUS_ECLBS = 0x0400,
154 C2_FLASH_STATUS_PSLBS = 0x0800,
155 C2_FLASH_STATUS_VPENS = 0x1000,
156};
157
158#endif /* _C2_STATUS_H_ */
diff --git a/drivers/infiniband/hw/amso1100/c2_user.h b/drivers/infiniband/hw/amso1100/c2_user.h
new file mode 100644
index 000000000000..7e9e7ad65467
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_user.h
@@ -0,0 +1,82 @@
1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 */
35
36#ifndef C2_USER_H
37#define C2_USER_H
38
39#include <linux/types.h>
40
41/*
42 * Make sure that all structs defined in this file remain laid out so
43 * that they pack the same way on 32-bit and 64-bit architectures (to
44 * avoid incompatibility between 32-bit userspace and 64-bit kernels).
45 * In particular do not use pointer types -- pass pointers in __u64
46 * instead.
47 */
48
49struct c2_alloc_ucontext_resp {
50 __u32 qp_tab_size;
51 __u32 uarc_size;
52};
53
54struct c2_alloc_pd_resp {
55 __u32 pdn;
56 __u32 reserved;
57};
58
59struct c2_create_cq {
60 __u32 lkey;
61 __u32 pdn;
62 __u64 arm_db_page;
63 __u64 set_db_page;
64 __u32 arm_db_index;
65 __u32 set_db_index;
66};
67
68struct c2_create_cq_resp {
69 __u32 cqn;
70 __u32 reserved;
71};
72
73struct c2_create_qp {
74 __u32 lkey;
75 __u32 reserved;
76 __u64 sq_db_page;
77 __u64 rq_db_page;
78 __u32 sq_db_index;
79 __u32 rq_db_index;
80};
81
82#endif /* C2_USER_H */
diff --git a/drivers/infiniband/hw/amso1100/c2_vq.c b/drivers/infiniband/hw/amso1100/c2_vq.c
new file mode 100644
index 000000000000..40caeb5f41b4
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_vq.c
@@ -0,0 +1,260 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#include <linux/slab.h>
34#include <linux/spinlock.h>
35
36#include "c2_vq.h"
37#include "c2_provider.h"
38
39/*
40 * Verbs Request Objects:
41 *
42 * VQ Request Objects are allocated by the kernel verbs handlers.
43 * They contain a wait object, a refcnt, an atomic bool indicating that the
44 * adapter has replied, and a copy of the verb reply work request.
45 * A pointer to the VQ Request Object is passed down in the context
46 * field of the work request message, and reflected back by the adapter
47 * in the verbs reply message. The function handle_vq() in the interrupt
48 * path will use this pointer to:
49 * 1) append a copy of the verbs reply message
50 * 2) mark that the reply is ready
51 * 3) wake up the kernel verbs handler blocked awaiting the reply.
52 *
53 *
54 * The kernel verbs handlers do a "get" to put a 2nd reference on the
55 * VQ Request object. If the kernel verbs handler exits before the adapter
56 * can respond, this extra reference will keep the VQ Request object around
57 * until the adapter's reply can be processed. The reason we need this is
58 * because a pointer to this object is stuffed into the context field of
59 * the verbs work request message, and reflected back in the reply message.
60 * It is used in the interrupt handler (handle_vq()) to wake up the appropriate
61 * kernel verb handler that is blocked awaiting the verb reply.
62 * So handle_vq() will do a "put" on the object when it's done accessing it.
63 * NOTE: If we guarantee that the kernel verb handler will never bail before
64 * getting the reply, then we don't need these refcnts.
65 *
66 *
67 * VQ Request objects are freed by the kernel verbs handlers only
68 * after the verb has been processed, or when the adapter fails and
69 * does not reply.
70 *
71 *
72 * Verbs Reply Buffers:
73 *
74 * VQ Reply bufs are local host memory copies of a
75 * outstanding Verb Request reply
76 * message. The are always allocated by the kernel verbs handlers, and _may_ be
77 * freed by either the kernel verbs handler -or- the interrupt handler. The
78 * kernel verbs handler _must_ free the repbuf, then free the vq request object
79 * in that order.
80 */
81
82int vq_init(struct c2_dev *c2dev)
83{
84 sprintf(c2dev->vq_cache_name, "c2-vq:dev%c",
85 (char) ('0' + c2dev->devnum));
86 c2dev->host_msg_cache =
87 kmem_cache_create(c2dev->vq_cache_name, c2dev->rep_vq.msg_size, 0,
88 SLAB_HWCACHE_ALIGN, NULL, NULL);
89 if (c2dev->host_msg_cache == NULL) {
90 return -ENOMEM;
91 }
92 return 0;
93}
94
95void vq_term(struct c2_dev *c2dev)
96{
97 kmem_cache_destroy(c2dev->host_msg_cache);
98}
99
100/* vq_req_alloc - allocate a VQ Request Object and initialize it.
101 * The refcnt is set to 1.
102 */
103struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev)
104{
105 struct c2_vq_req *r;
106
107 r = kmalloc(sizeof(struct c2_vq_req), GFP_KERNEL);
108 if (r) {
109 init_waitqueue_head(&r->wait_object);
110 r->reply_msg = (u64) NULL;
111 r->event = 0;
112 r->cm_id = NULL;
113 r->qp = NULL;
114 atomic_set(&r->refcnt, 1);
115 atomic_set(&r->reply_ready, 0);
116 }
117 return r;
118}
119
120
121/* vq_req_free - free the VQ Request Object. It is assumed the verbs handler
122 * has already free the VQ Reply Buffer if it existed.
123 */
124void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *r)
125{
126 r->reply_msg = (u64) NULL;
127 if (atomic_dec_and_test(&r->refcnt)) {
128 kfree(r);
129 }
130}
131
132/* vq_req_get - reference a VQ Request Object. Done
133 * only in the kernel verbs handlers.
134 */
135void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *r)
136{
137 atomic_inc(&r->refcnt);
138}
139
140
141/* vq_req_put - dereference and potentially free a VQ Request Object.
142 *
143 * This is only called by handle_vq() on the
144 * interrupt when it is done processing
145 * a verb reply message. If the associated
146 * kernel verbs handler has already bailed,
147 * then this put will actually free the VQ
148 * Request object _and_ the VQ Reply Buffer
149 * if it exists.
150 */
151void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r)
152{
153 if (atomic_dec_and_test(&r->refcnt)) {
154 if (r->reply_msg != (u64) NULL)
155 vq_repbuf_free(c2dev,
156 (void *) (unsigned long) r->reply_msg);
157 kfree(r);
158 }
159}
160
161
162/*
163 * vq_repbuf_alloc - allocate a VQ Reply Buffer.
164 */
165void *vq_repbuf_alloc(struct c2_dev *c2dev)
166{
167 return kmem_cache_alloc(c2dev->host_msg_cache, SLAB_ATOMIC);
168}
169
170/*
171 * vq_send_wr - post a verbs request message to the Verbs Request Queue.
172 * If a message is not available in the MQ, then block until one is available.
173 * NOTE: handle_mq() on the interrupt context will wake up threads blocked here.
174 * When the adapter drains the Verbs Request Queue,
175 * it inserts MQ index 0 in to the
176 * adapter->host activity fifo and interrupts the host.
177 */
178int vq_send_wr(struct c2_dev *c2dev, union c2wr *wr)
179{
180 void *msg;
181 wait_queue_t __wait;
182
183 /*
184 * grab adapter vq lock
185 */
186 spin_lock(&c2dev->vqlock);
187
188 /*
189 * allocate msg
190 */
191 msg = c2_mq_alloc(&c2dev->req_vq);
192
193 /*
194 * If we cannot get a msg, then we'll wait
195 * When a messages are available, the int handler will wake_up()
196 * any waiters.
197 */
198 while (msg == NULL) {
199 pr_debug("%s:%d no available msg in VQ, waiting...\n",
200 __FUNCTION__, __LINE__);
201 init_waitqueue_entry(&__wait, current);
202 add_wait_queue(&c2dev->req_vq_wo, &__wait);
203 spin_unlock(&c2dev->vqlock);
204 for (;;) {
205 set_current_state(TASK_INTERRUPTIBLE);
206 if (!c2_mq_full(&c2dev->req_vq)) {
207 break;
208 }
209 if (!signal_pending(current)) {
210 schedule_timeout(1 * HZ); /* 1 second... */
211 continue;
212 }
213 set_current_state(TASK_RUNNING);
214 remove_wait_queue(&c2dev->req_vq_wo, &__wait);
215 return -EINTR;
216 }
217 set_current_state(TASK_RUNNING);
218 remove_wait_queue(&c2dev->req_vq_wo, &__wait);
219 spin_lock(&c2dev->vqlock);
220 msg = c2_mq_alloc(&c2dev->req_vq);
221 }
222
223 /*
224 * copy wr into adapter msg
225 */
226 memcpy(msg, wr, c2dev->req_vq.msg_size);
227
228 /*
229 * post msg
230 */
231 c2_mq_produce(&c2dev->req_vq);
232
233 /*
234 * release adapter vq lock
235 */
236 spin_unlock(&c2dev->vqlock);
237 return 0;
238}
239
240
241/*
242 * vq_wait_for_reply - block until the adapter posts a Verb Reply Message.
243 */
244int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req)
245{
246 if (!wait_event_timeout(req->wait_object,
247 atomic_read(&req->reply_ready),
248 60*HZ))
249 return -ETIMEDOUT;
250
251 return 0;
252}
253
254/*
255 * vq_repbuf_free - Free a Verbs Reply Buffer.
256 */
257void vq_repbuf_free(struct c2_dev *c2dev, void *reply)
258{
259 kmem_cache_free(c2dev->host_msg_cache, reply);
260}
diff --git a/drivers/infiniband/hw/amso1100/c2_vq.h b/drivers/infiniband/hw/amso1100/c2_vq.h
new file mode 100644
index 000000000000..33805627a607
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_vq.h
@@ -0,0 +1,63 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef _C2_VQ_H_
34#define _C2_VQ_H_
35#include <linux/sched.h>
36#include "c2.h"
37#include "c2_wr.h"
38#include "c2_provider.h"
39
40struct c2_vq_req {
41 u64 reply_msg; /* ptr to reply msg */
42 wait_queue_head_t wait_object; /* wait object for vq reqs */
43 atomic_t reply_ready; /* set when reply is ready */
44 atomic_t refcnt; /* used to cancel WRs... */
45 int event;
46 struct iw_cm_id *cm_id;
47 struct c2_qp *qp;
48};
49
50extern int vq_init(struct c2_dev *c2dev);
51extern void vq_term(struct c2_dev *c2dev);
52
53extern struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev);
54extern void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *req);
55extern void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *req);
56extern void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *req);
57extern int vq_send_wr(struct c2_dev *c2dev, union c2wr * wr);
58
59extern void *vq_repbuf_alloc(struct c2_dev *c2dev);
60extern void vq_repbuf_free(struct c2_dev *c2dev, void *reply);
61
62extern int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req);
63#endif /* _C2_VQ_H_ */
diff --git a/drivers/infiniband/hw/amso1100/c2_wr.h b/drivers/infiniband/hw/amso1100/c2_wr.h
new file mode 100644
index 000000000000..3ec6c43bb0ef
--- /dev/null
+++ b/drivers/infiniband/hw/amso1100/c2_wr.h
@@ -0,0 +1,1520 @@
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef _C2_WR_H_
34#define _C2_WR_H_
35
36#ifdef CCDEBUG
37#define CCWR_MAGIC 0xb07700b0
38#endif
39
40#define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFF
41
42/* Maximum allowed size in bytes of private_data exchange
43 * on connect.
44 */
45#define C2_MAX_PRIVATE_DATA_SIZE 200
46
47/*
48 * These types are shared among the adapter, host, and CCIL consumer.
49 */
50enum c2_cq_notification_type {
51 C2_CQ_NOTIFICATION_TYPE_NONE = 1,
52 C2_CQ_NOTIFICATION_TYPE_NEXT,
53 C2_CQ_NOTIFICATION_TYPE_NEXT_SE
54};
55
56enum c2_setconfig_cmd {
57 C2_CFG_ADD_ADDR = 1,
58 C2_CFG_DEL_ADDR = 2,
59 C2_CFG_ADD_ROUTE = 3,
60 C2_CFG_DEL_ROUTE = 4
61};
62
63enum c2_getconfig_cmd {
64 C2_GETCONFIG_ROUTES = 1,
65 C2_GETCONFIG_ADDRS
66};
67
68/*
69 * CCIL Work Request Identifiers
70 */
71enum c2wr_ids {
72 CCWR_RNIC_OPEN = 1,
73 CCWR_RNIC_QUERY,
74 CCWR_RNIC_SETCONFIG,
75 CCWR_RNIC_GETCONFIG,
76 CCWR_RNIC_CLOSE,
77 CCWR_CQ_CREATE,
78 CCWR_CQ_QUERY,
79 CCWR_CQ_MODIFY,
80 CCWR_CQ_DESTROY,
81 CCWR_QP_CONNECT,
82 CCWR_PD_ALLOC,
83 CCWR_PD_DEALLOC,
84 CCWR_SRQ_CREATE,
85 CCWR_SRQ_QUERY,
86 CCWR_SRQ_MODIFY,
87 CCWR_SRQ_DESTROY,
88 CCWR_QP_CREATE,
89 CCWR_QP_QUERY,
90 CCWR_QP_MODIFY,
91 CCWR_QP_DESTROY,
92 CCWR_NSMR_STAG_ALLOC,
93 CCWR_NSMR_REGISTER,
94 CCWR_NSMR_PBL,
95 CCWR_STAG_DEALLOC,
96 CCWR_NSMR_REREGISTER,
97 CCWR_SMR_REGISTER,
98 CCWR_MR_QUERY,
99 CCWR_MW_ALLOC,
100 CCWR_MW_QUERY,
101 CCWR_EP_CREATE,
102 CCWR_EP_GETOPT,
103 CCWR_EP_SETOPT,
104 CCWR_EP_DESTROY,
105 CCWR_EP_BIND,
106 CCWR_EP_CONNECT,
107 CCWR_EP_LISTEN,
108 CCWR_EP_SHUTDOWN,
109 CCWR_EP_LISTEN_CREATE,
110 CCWR_EP_LISTEN_DESTROY,
111 CCWR_EP_QUERY,
112 CCWR_CR_ACCEPT,
113 CCWR_CR_REJECT,
114 CCWR_CONSOLE,
115 CCWR_TERM,
116 CCWR_FLASH_INIT,
117 CCWR_FLASH,
118 CCWR_BUF_ALLOC,
119 CCWR_BUF_FREE,
120 CCWR_FLASH_WRITE,
121 CCWR_INIT, /* WARNING: Don't move this ever again! */
122
123
124
125 /* Add new IDs here */
126
127
128
129 /*
130 * WARNING: CCWR_LAST must always be the last verbs id defined!
131 * All the preceding IDs are fixed, and must not change.
132 * You can add new IDs, but must not remove or reorder
133 * any IDs. If you do, YOU will ruin any hope of
134 * compatability between versions.
135 */
136 CCWR_LAST,
137
138 /*
139 * Start over at 1 so that arrays indexed by user wr id's
140 * begin at 1. This is OK since the verbs and user wr id's
141 * are always used on disjoint sets of queues.
142 */
143 /*
144 * The order of the CCWR_SEND_XX verbs must
145 * match the order of the RDMA_OPs
146 */
147 CCWR_SEND = 1,
148 CCWR_SEND_INV,
149 CCWR_SEND_SE,
150 CCWR_SEND_SE_INV,
151 CCWR_RDMA_WRITE,
152 CCWR_RDMA_READ,
153 CCWR_RDMA_READ_INV,
154 CCWR_MW_BIND,
155 CCWR_NSMR_FASTREG,
156 CCWR_STAG_INVALIDATE,
157 CCWR_RECV,
158 CCWR_NOP,
159 CCWR_UNIMPL,
160/* WARNING: This must always be the last user wr id defined! */
161};
162#define RDMA_SEND_OPCODE_FROM_WR_ID(x) (x+2)
163
164/*
165 * SQ/RQ Work Request Types
166 */
167enum c2_wr_type {
168 C2_WR_TYPE_SEND = CCWR_SEND,
169 C2_WR_TYPE_SEND_SE = CCWR_SEND_SE,
170 C2_WR_TYPE_SEND_INV = CCWR_SEND_INV,
171 C2_WR_TYPE_SEND_SE_INV = CCWR_SEND_SE_INV,
172 C2_WR_TYPE_RDMA_WRITE = CCWR_RDMA_WRITE,
173 C2_WR_TYPE_RDMA_READ = CCWR_RDMA_READ,
174 C2_WR_TYPE_RDMA_READ_INV_STAG = CCWR_RDMA_READ_INV,
175 C2_WR_TYPE_BIND_MW = CCWR_MW_BIND,
176 C2_WR_TYPE_FASTREG_NSMR = CCWR_NSMR_FASTREG,
177 C2_WR_TYPE_INV_STAG = CCWR_STAG_INVALIDATE,
178 C2_WR_TYPE_RECV = CCWR_RECV,
179 C2_WR_TYPE_NOP = CCWR_NOP,
180};
181
182struct c2_netaddr {
183 u32 ip_addr;
184 u32 netmask;
185 u32 mtu;
186};
187
188struct c2_route {
189 u32 ip_addr; /* 0 indicates the default route */
190 u32 netmask; /* netmask associated with dst */
191 u32 flags;
192 union {
193 u32 ipaddr; /* address of the nexthop interface */
194 u8 enaddr[6];
195 } nexthop;
196};
197
198/*
199 * A Scatter Gather Entry.
200 */
201struct c2_data_addr {
202 u32 stag;
203 u32 length;
204 u64 to;
205};
206
207/*
208 * MR and MW flags used by the consumer, RI, and RNIC.
209 */
210enum c2_mm_flags {
211 MEM_REMOTE = 0x0001, /* allow mw binds with remote access. */
212 MEM_VA_BASED = 0x0002, /* Not Zero-based */
213 MEM_PBL_COMPLETE = 0x0004, /* PBL array is complete in this msg */
214 MEM_LOCAL_READ = 0x0008, /* allow local reads */
215 MEM_LOCAL_WRITE = 0x0010, /* allow local writes */
216 MEM_REMOTE_READ = 0x0020, /* allow remote reads */
217 MEM_REMOTE_WRITE = 0x0040, /* allow remote writes */
218 MEM_WINDOW_BIND = 0x0080, /* binds allowed */
219 MEM_SHARED = 0x0100, /* set if MR is shared */
220 MEM_STAG_VALID = 0x0200 /* set if STAG is in valid state */
221};
222
223/*
224 * CCIL API ACF flags defined in terms of the low level mem flags.
225 * This minimizes translation needed in the user API
226 */
227enum c2_acf {
228 C2_ACF_LOCAL_READ = MEM_LOCAL_READ,
229 C2_ACF_LOCAL_WRITE = MEM_LOCAL_WRITE,
230 C2_ACF_REMOTE_READ = MEM_REMOTE_READ,
231 C2_ACF_REMOTE_WRITE = MEM_REMOTE_WRITE,
232 C2_ACF_WINDOW_BIND = MEM_WINDOW_BIND
233};
234
235/*
236 * Image types of objects written to flash
237 */
238#define C2_FLASH_IMG_BITFILE 1
239#define C2_FLASH_IMG_OPTION_ROM 2
240#define C2_FLASH_IMG_VPD 3
241
242/*
243 * to fix bug 1815 we define the max size allowable of the
244 * terminate message (per the IETF spec).Refer to the IETF
245 * protocal specification, section 12.1.6, page 64)
246 * The message is prefixed by 20 types of DDP info.
247 *
248 * Then the message has 6 bytes for the terminate control
249 * and DDP segment length info plus a DDP header (either
250 * 14 or 18 byts) plus 28 bytes for the RDMA header.
251 * Thus the max size in:
252 * 20 + (6 + 18 + 28) = 72
253 */
254#define C2_MAX_TERMINATE_MESSAGE_SIZE (72)
255
256/*
257 * Build String Length. It must be the same as C2_BUILD_STR_LEN in ccil_api.h
258 */
259#define WR_BUILD_STR_LEN 64
260
261/*
262 * WARNING: All of these structs need to align any 64bit types on
263 * 64 bit boundaries! 64bit types include u64 and u64.
264 */
265
266/*
267 * Clustercore Work Request Header. Be sensitive to field layout
268 * and alignment.
269 */
270struct c2wr_hdr {
271 /* wqe_count is part of the cqe. It is put here so the
272 * adapter can write to it while the wr is pending without
273 * clobbering part of the wr. This word need not be dma'd
274 * from the host to adapter by libccil, but we copy it anyway
275 * to make the memcpy to the adapter better aligned.
276 */
277 u32 wqe_count;
278
279 /* Put these fields next so that later 32- and 64-bit
280 * quantities are naturally aligned.
281 */
282 u8 id;
283 u8 result; /* adapter -> host */
284 u8 sge_count; /* host -> adapter */
285 u8 flags; /* host -> adapter */
286
287 u64 context;
288#ifdef CCMSGMAGIC
289 u32 magic;
290 u32 pad;
291#endif
292} __attribute__((packed));
293
294/*
295 *------------------------ RNIC ------------------------
296 */
297
298/*
299 * WR_RNIC_OPEN
300 */
301
302/*
303 * Flags for the RNIC WRs
304 */
305enum c2_rnic_flags {
306 RNIC_IRD_STATIC = 0x0001,
307 RNIC_ORD_STATIC = 0x0002,
308 RNIC_QP_STATIC = 0x0004,
309 RNIC_SRQ_SUPPORTED = 0x0008,
310 RNIC_PBL_BLOCK_MODE = 0x0010,
311 RNIC_SRQ_MODEL_ARRIVAL = 0x0020,
312 RNIC_CQ_OVF_DETECTED = 0x0040,
313 RNIC_PRIV_MODE = 0x0080
314};
315
316struct c2wr_rnic_open_req {
317 struct c2wr_hdr hdr;
318 u64 user_context;
319 u16 flags; /* See enum c2_rnic_flags */
320 u16 port_num;
321} __attribute__((packed));
322
323struct c2wr_rnic_open_rep {
324 struct c2wr_hdr hdr;
325 u32 rnic_handle;
326} __attribute__((packed));
327
328union c2wr_rnic_open {
329 struct c2wr_rnic_open_req req;
330 struct c2wr_rnic_open_rep rep;
331} __attribute__((packed));
332
333struct c2wr_rnic_query_req {
334 struct c2wr_hdr hdr;
335 u32 rnic_handle;
336} __attribute__((packed));
337
338/*
339 * WR_RNIC_QUERY
340 */
341struct c2wr_rnic_query_rep {
342 struct c2wr_hdr hdr;
343 u64 user_context;
344 u32 vendor_id;
345 u32 part_number;
346 u32 hw_version;
347 u32 fw_ver_major;
348 u32 fw_ver_minor;
349 u32 fw_ver_patch;
350 char fw_ver_build_str[WR_BUILD_STR_LEN];
351 u32 max_qps;
352 u32 max_qp_depth;
353 u32 max_srq_depth;
354 u32 max_send_sgl_depth;
355 u32 max_rdma_sgl_depth;
356 u32 max_cqs;
357 u32 max_cq_depth;
358 u32 max_cq_event_handlers;
359 u32 max_mrs;
360 u32 max_pbl_depth;
361 u32 max_pds;
362 u32 max_global_ird;
363 u32 max_global_ord;
364 u32 max_qp_ird;
365 u32 max_qp_ord;
366 u32 flags;
367 u32 max_mws;
368 u32 pbe_range_low;
369 u32 pbe_range_high;
370 u32 max_srqs;
371 u32 page_size;
372} __attribute__((packed));
373
374union c2wr_rnic_query {
375 struct c2wr_rnic_query_req req;
376 struct c2wr_rnic_query_rep rep;
377} __attribute__((packed));
378
379/*
380 * WR_RNIC_GETCONFIG
381 */
382
383struct c2wr_rnic_getconfig_req {
384 struct c2wr_hdr hdr;
385 u32 rnic_handle;
386 u32 option; /* see c2_getconfig_cmd_t */
387 u64 reply_buf;
388 u32 reply_buf_len;
389} __attribute__((packed)) ;
390
391struct c2wr_rnic_getconfig_rep {
392 struct c2wr_hdr hdr;
393 u32 option; /* see c2_getconfig_cmd_t */
394 u32 count_len; /* length of the number of addresses configured */
395} __attribute__((packed)) ;
396
397union c2wr_rnic_getconfig {
398 struct c2wr_rnic_getconfig_req req;
399 struct c2wr_rnic_getconfig_rep rep;
400} __attribute__((packed)) ;
401
402/*
403 * WR_RNIC_SETCONFIG
404 */
405struct c2wr_rnic_setconfig_req {
406 struct c2wr_hdr hdr;
407 u32 rnic_handle;
408 u32 option; /* See c2_setconfig_cmd_t */
409 /* variable data and pad. See c2_netaddr and c2_route */
410 u8 data[0];
411} __attribute__((packed)) ;
412
413struct c2wr_rnic_setconfig_rep {
414 struct c2wr_hdr hdr;
415} __attribute__((packed)) ;
416
417union c2wr_rnic_setconfig {
418 struct c2wr_rnic_setconfig_req req;
419 struct c2wr_rnic_setconfig_rep rep;
420} __attribute__((packed)) ;
421
422/*
423 * WR_RNIC_CLOSE
424 */
425struct c2wr_rnic_close_req {
426 struct c2wr_hdr hdr;
427 u32 rnic_handle;
428} __attribute__((packed)) ;
429
430struct c2wr_rnic_close_rep {
431 struct c2wr_hdr hdr;
432} __attribute__((packed)) ;
433
434union c2wr_rnic_close {
435 struct c2wr_rnic_close_req req;
436 struct c2wr_rnic_close_rep rep;
437} __attribute__((packed)) ;
438
439/*
440 *------------------------ CQ ------------------------
441 */
442struct c2wr_cq_create_req {
443 struct c2wr_hdr hdr;
444 u64 shared_ht;
445 u64 user_context;
446 u64 msg_pool;
447 u32 rnic_handle;
448 u32 msg_size;
449 u32 depth;
450} __attribute__((packed)) ;
451
452struct c2wr_cq_create_rep {
453 struct c2wr_hdr hdr;
454 u32 mq_index;
455 u32 adapter_shared;
456 u32 cq_handle;
457} __attribute__((packed)) ;
458
459union c2wr_cq_create {
460 struct c2wr_cq_create_req req;
461 struct c2wr_cq_create_rep rep;
462} __attribute__((packed)) ;
463
464struct c2wr_cq_modify_req {
465 struct c2wr_hdr hdr;
466 u32 rnic_handle;
467 u32 cq_handle;
468 u32 new_depth;
469 u64 new_msg_pool;
470} __attribute__((packed)) ;
471
472struct c2wr_cq_modify_rep {
473 struct c2wr_hdr hdr;
474} __attribute__((packed)) ;
475
476union c2wr_cq_modify {
477 struct c2wr_cq_modify_req req;
478 struct c2wr_cq_modify_rep rep;
479} __attribute__((packed)) ;
480
481struct c2wr_cq_destroy_req {
482 struct c2wr_hdr hdr;
483 u32 rnic_handle;
484 u32 cq_handle;
485} __attribute__((packed)) ;
486
487struct c2wr_cq_destroy_rep {
488 struct c2wr_hdr hdr;
489} __attribute__((packed)) ;
490
491union c2wr_cq_destroy {
492 struct c2wr_cq_destroy_req req;
493 struct c2wr_cq_destroy_rep rep;
494} __attribute__((packed)) ;
495
496/*
497 *------------------------ PD ------------------------
498 */
499struct c2wr_pd_alloc_req {
500 struct c2wr_hdr hdr;
501 u32 rnic_handle;
502 u32 pd_id;
503} __attribute__((packed)) ;
504
505struct c2wr_pd_alloc_rep {
506 struct c2wr_hdr hdr;
507} __attribute__((packed)) ;
508
509union c2wr_pd_alloc {
510 struct c2wr_pd_alloc_req req;
511 struct c2wr_pd_alloc_rep rep;
512} __attribute__((packed)) ;
513
514struct c2wr_pd_dealloc_req {
515 struct c2wr_hdr hdr;
516 u32 rnic_handle;
517 u32 pd_id;
518} __attribute__((packed)) ;
519
520struct c2wr_pd_dealloc_rep {
521 struct c2wr_hdr hdr;
522} __attribute__((packed)) ;
523
524union c2wr_pd_dealloc {
525 struct c2wr_pd_dealloc_req req;
526 struct c2wr_pd_dealloc_rep rep;
527} __attribute__((packed)) ;
528
529/*
530 *------------------------ SRQ ------------------------
531 */
532struct c2wr_srq_create_req {
533 struct c2wr_hdr hdr;
534 u64 shared_ht;
535 u64 user_context;
536 u32 rnic_handle;
537 u32 srq_depth;
538 u32 srq_limit;
539 u32 sgl_depth;
540 u32 pd_id;
541} __attribute__((packed)) ;
542
543struct c2wr_srq_create_rep {
544 struct c2wr_hdr hdr;
545 u32 srq_depth;
546 u32 sgl_depth;
547 u32 msg_size;
548 u32 mq_index;
549 u32 mq_start;
550 u32 srq_handle;
551} __attribute__((packed)) ;
552
553union c2wr_srq_create {
554 struct c2wr_srq_create_req req;
555 struct c2wr_srq_create_rep rep;
556} __attribute__((packed)) ;
557
558struct c2wr_srq_destroy_req {
559 struct c2wr_hdr hdr;
560 u32 rnic_handle;
561 u32 srq_handle;
562} __attribute__((packed)) ;
563
564struct c2wr_srq_destroy_rep {
565 struct c2wr_hdr hdr;
566} __attribute__((packed)) ;
567
568union c2wr_srq_destroy {
569 struct c2wr_srq_destroy_req req;
570 struct c2wr_srq_destroy_rep rep;
571} __attribute__((packed)) ;
572
573/*
574 *------------------------ QP ------------------------
575 */
576enum c2wr_qp_flags {
577 QP_RDMA_READ = 0x00000001, /* RDMA read enabled? */
578 QP_RDMA_WRITE = 0x00000002, /* RDMA write enabled? */
579 QP_MW_BIND = 0x00000004, /* MWs enabled */
580 QP_ZERO_STAG = 0x00000008, /* enabled? */
581 QP_REMOTE_TERMINATION = 0x00000010, /* remote end terminated */
582 QP_RDMA_READ_RESPONSE = 0x00000020 /* Remote RDMA read */
583 /* enabled? */
584};
585
586struct c2wr_qp_create_req {
587 struct c2wr_hdr hdr;
588 u64 shared_sq_ht;
589 u64 shared_rq_ht;
590 u64 user_context;
591 u32 rnic_handle;
592 u32 sq_cq_handle;
593 u32 rq_cq_handle;
594 u32 sq_depth;
595 u32 rq_depth;
596 u32 srq_handle;
597 u32 srq_limit;
598 u32 flags; /* see enum c2wr_qp_flags */
599 u32 send_sgl_depth;
600 u32 recv_sgl_depth;
601 u32 rdma_write_sgl_depth;
602 u32 ord;
603 u32 ird;
604 u32 pd_id;
605} __attribute__((packed)) ;
606
607struct c2wr_qp_create_rep {
608 struct c2wr_hdr hdr;
609 u32 sq_depth;
610 u32 rq_depth;
611 u32 send_sgl_depth;
612 u32 recv_sgl_depth;
613 u32 rdma_write_sgl_depth;
614 u32 ord;
615 u32 ird;
616 u32 sq_msg_size;
617 u32 sq_mq_index;
618 u32 sq_mq_start;
619 u32 rq_msg_size;
620 u32 rq_mq_index;
621 u32 rq_mq_start;
622 u32 qp_handle;
623} __attribute__((packed)) ;
624
625union c2wr_qp_create {
626 struct c2wr_qp_create_req req;
627 struct c2wr_qp_create_rep rep;
628} __attribute__((packed)) ;
629
630struct c2wr_qp_query_req {
631 struct c2wr_hdr hdr;
632 u32 rnic_handle;
633 u32 qp_handle;
634} __attribute__((packed)) ;
635
636struct c2wr_qp_query_rep {
637 struct c2wr_hdr hdr;
638 u64 user_context;
639 u32 rnic_handle;
640 u32 sq_depth;
641 u32 rq_depth;
642 u32 send_sgl_depth;
643 u32 rdma_write_sgl_depth;
644 u32 recv_sgl_depth;
645 u32 ord;
646 u32 ird;
647 u16 qp_state;
648 u16 flags; /* see c2wr_qp_flags_t */
649 u32 qp_id;
650 u32 local_addr;
651 u32 remote_addr;
652 u16 local_port;
653 u16 remote_port;
654 u32 terminate_msg_length; /* 0 if not present */
655 u8 data[0];
656 /* Terminate Message in-line here. */
657} __attribute__((packed)) ;
658
659union c2wr_qp_query {
660 struct c2wr_qp_query_req req;
661 struct c2wr_qp_query_rep rep;
662} __attribute__((packed)) ;
663
664struct c2wr_qp_modify_req {
665 struct c2wr_hdr hdr;
666 u64 stream_msg;
667 u32 stream_msg_length;
668 u32 rnic_handle;
669 u32 qp_handle;
670 u32 next_qp_state;
671 u32 ord;
672 u32 ird;
673 u32 sq_depth;
674 u32 rq_depth;
675 u32 llp_ep_handle;
676} __attribute__((packed)) ;
677
678struct c2wr_qp_modify_rep {
679 struct c2wr_hdr hdr;
680 u32 ord;
681 u32 ird;
682 u32 sq_depth;
683 u32 rq_depth;
684 u32 sq_msg_size;
685 u32 sq_mq_index;
686 u32 sq_mq_start;
687 u32 rq_msg_size;
688 u32 rq_mq_index;
689 u32 rq_mq_start;
690} __attribute__((packed)) ;
691
692union c2wr_qp_modify {
693 struct c2wr_qp_modify_req req;
694 struct c2wr_qp_modify_rep rep;
695} __attribute__((packed)) ;
696
697struct c2wr_qp_destroy_req {
698 struct c2wr_hdr hdr;
699 u32 rnic_handle;
700 u32 qp_handle;
701} __attribute__((packed)) ;
702
703struct c2wr_qp_destroy_rep {
704 struct c2wr_hdr hdr;
705} __attribute__((packed)) ;
706
707union c2wr_qp_destroy {
708 struct c2wr_qp_destroy_req req;
709 struct c2wr_qp_destroy_rep rep;
710} __attribute__((packed)) ;
711
712/*
713 * The CCWR_QP_CONNECT msg is posted on the verbs request queue. It can
714 * only be posted when a QP is in IDLE state. After the connect request is
715 * submitted to the LLP, the adapter moves the QP to CONNECT_PENDING state.
716 * No synchronous reply from adapter to this WR. The results of
717 * connection are passed back in an async event CCAE_ACTIVE_CONNECT_RESULTS
718 * See c2wr_ae_active_connect_results_t
719 */
720struct c2wr_qp_connect_req {
721 struct c2wr_hdr hdr;
722 u32 rnic_handle;
723 u32 qp_handle;
724 u32 remote_addr;
725 u16 remote_port;
726 u16 pad;
727 u32 private_data_length;
728 u8 private_data[0]; /* Private data in-line. */
729} __attribute__((packed)) ;
730
731struct c2wr_qp_connect {
732 struct c2wr_qp_connect_req req;
733 /* no synchronous reply. */
734} __attribute__((packed)) ;
735
736
737/*
738 *------------------------ MM ------------------------
739 */
740
741struct c2wr_nsmr_stag_alloc_req {
742 struct c2wr_hdr hdr;
743 u32 rnic_handle;
744 u32 pbl_depth;
745 u32 pd_id;
746 u32 flags;
747} __attribute__((packed)) ;
748
749struct c2wr_nsmr_stag_alloc_rep {
750 struct c2wr_hdr hdr;
751 u32 pbl_depth;
752 u32 stag_index;
753} __attribute__((packed)) ;
754
755union c2wr_nsmr_stag_alloc {
756 struct c2wr_nsmr_stag_alloc_req req;
757 struct c2wr_nsmr_stag_alloc_rep rep;
758} __attribute__((packed)) ;
759
760struct c2wr_nsmr_register_req {
761 struct c2wr_hdr hdr;
762 u64 va;
763 u32 rnic_handle;
764 u16 flags;
765 u8 stag_key;
766 u8 pad;
767 u32 pd_id;
768 u32 pbl_depth;
769 u32 pbe_size;
770 u32 fbo;
771 u32 length;
772 u32 addrs_length;
773 /* array of paddrs (must be aligned on a 64bit boundary) */
774 u64 paddrs[0];
775} __attribute__((packed)) ;
776
777struct c2wr_nsmr_register_rep {
778 struct c2wr_hdr hdr;
779 u32 pbl_depth;
780 u32 stag_index;
781} __attribute__((packed)) ;
782
783union c2wr_nsmr_register {
784 struct c2wr_nsmr_register_req req;
785 struct c2wr_nsmr_register_rep rep;
786} __attribute__((packed)) ;
787
788struct c2wr_nsmr_pbl_req {
789 struct c2wr_hdr hdr;
790 u32 rnic_handle;
791 u32 flags;
792 u32 stag_index;
793 u32 addrs_length;
794 /* array of paddrs (must be aligned on a 64bit boundary) */
795 u64 paddrs[0];
796} __attribute__((packed)) ;
797
798struct c2wr_nsmr_pbl_rep {
799 struct c2wr_hdr hdr;
800} __attribute__((packed)) ;
801
802union c2wr_nsmr_pbl {
803 struct c2wr_nsmr_pbl_req req;
804 struct c2wr_nsmr_pbl_rep rep;
805} __attribute__((packed)) ;
806
807struct c2wr_mr_query_req {
808 struct c2wr_hdr hdr;
809 u32 rnic_handle;
810 u32 stag_index;
811} __attribute__((packed)) ;
812
813struct c2wr_mr_query_rep {
814 struct c2wr_hdr hdr;
815 u8 stag_key;
816 u8 pad[3];
817 u32 pd_id;
818 u32 flags;
819 u32 pbl_depth;
820} __attribute__((packed)) ;
821
822union c2wr_mr_query {
823 struct c2wr_mr_query_req req;
824 struct c2wr_mr_query_rep rep;
825} __attribute__((packed)) ;
826
827struct c2wr_mw_query_req {
828 struct c2wr_hdr hdr;
829 u32 rnic_handle;
830 u32 stag_index;
831} __attribute__((packed)) ;
832
833struct c2wr_mw_query_rep {
834 struct c2wr_hdr hdr;
835 u8 stag_key;
836 u8 pad[3];
837 u32 pd_id;
838 u32 flags;
839} __attribute__((packed)) ;
840
841union c2wr_mw_query {
842 struct c2wr_mw_query_req req;
843 struct c2wr_mw_query_rep rep;
844} __attribute__((packed)) ;
845
846
847struct c2wr_stag_dealloc_req {
848 struct c2wr_hdr hdr;
849 u32 rnic_handle;
850 u32 stag_index;
851} __attribute__((packed)) ;
852
853struct c2wr_stag_dealloc_rep {
854 struct c2wr_hdr hdr;
855} __attribute__((packed)) ;
856
857union c2wr_stag_dealloc {
858 struct c2wr_stag_dealloc_req req;
859 struct c2wr_stag_dealloc_rep rep;
860} __attribute__((packed)) ;
861
862struct c2wr_nsmr_reregister_req {
863 struct c2wr_hdr hdr;
864 u64 va;
865 u32 rnic_handle;
866 u16 flags;
867 u8 stag_key;
868 u8 pad;
869 u32 stag_index;
870 u32 pd_id;
871 u32 pbl_depth;
872 u32 pbe_size;
873 u32 fbo;
874 u32 length;
875 u32 addrs_length;
876 u32 pad1;
877 /* array of paddrs (must be aligned on a 64bit boundary) */
878 u64 paddrs[0];
879} __attribute__((packed)) ;
880
881struct c2wr_nsmr_reregister_rep {
882 struct c2wr_hdr hdr;
883 u32 pbl_depth;
884 u32 stag_index;
885} __attribute__((packed)) ;
886
887union c2wr_nsmr_reregister {
888 struct c2wr_nsmr_reregister_req req;
889 struct c2wr_nsmr_reregister_rep rep;
890} __attribute__((packed)) ;
891
892struct c2wr_smr_register_req {
893 struct c2wr_hdr hdr;
894 u64 va;
895 u32 rnic_handle;
896 u16 flags;
897 u8 stag_key;
898 u8 pad;
899 u32 stag_index;
900 u32 pd_id;
901} __attribute__((packed)) ;
902
903struct c2wr_smr_register_rep {
904 struct c2wr_hdr hdr;
905 u32 stag_index;
906} __attribute__((packed)) ;
907
908union c2wr_smr_register {
909 struct c2wr_smr_register_req req;
910 struct c2wr_smr_register_rep rep;
911} __attribute__((packed)) ;
912
913struct c2wr_mw_alloc_req {
914 struct c2wr_hdr hdr;
915 u32 rnic_handle;
916 u32 pd_id;
917} __attribute__((packed)) ;
918
919struct c2wr_mw_alloc_rep {
920 struct c2wr_hdr hdr;
921 u32 stag_index;
922} __attribute__((packed)) ;
923
924union c2wr_mw_alloc {
925 struct c2wr_mw_alloc_req req;
926 struct c2wr_mw_alloc_rep rep;
927} __attribute__((packed)) ;
928
929/*
930 *------------------------ WRs -----------------------
931 */
932
933struct c2wr_user_hdr {
934 struct c2wr_hdr hdr; /* Has status and WR Type */
935} __attribute__((packed)) ;
936
937enum c2_qp_state {
938 C2_QP_STATE_IDLE = 0x01,
939 C2_QP_STATE_CONNECTING = 0x02,
940 C2_QP_STATE_RTS = 0x04,
941 C2_QP_STATE_CLOSING = 0x08,
942 C2_QP_STATE_TERMINATE = 0x10,
943 C2_QP_STATE_ERROR = 0x20,
944};
945
946/* Completion queue entry. */
947struct c2wr_ce {
948 struct c2wr_hdr hdr; /* Has status and WR Type */
949 u64 qp_user_context; /* c2_user_qp_t * */
950 u32 qp_state; /* Current QP State */
951 u32 handle; /* QPID or EP Handle */
952 u32 bytes_rcvd; /* valid for RECV WCs */
953 u32 stag;
954} __attribute__((packed)) ;
955
956
957/*
958 * Flags used for all post-sq WRs. These must fit in the flags
959 * field of the struct c2wr_hdr (eight bits).
960 */
961enum {
962 SQ_SIGNALED = 0x01,
963 SQ_READ_FENCE = 0x02,
964 SQ_FENCE = 0x04,
965};
966
967/*
968 * Common fields for all post-sq WRs. Namely the standard header and a
969 * secondary header with fields common to all post-sq WRs.
970 */
971struct c2_sq_hdr {
972 struct c2wr_user_hdr user_hdr;
973} __attribute__((packed));
974
975/*
976 * Same as above but for post-rq WRs.
977 */
978struct c2_rq_hdr {
979 struct c2wr_user_hdr user_hdr;
980} __attribute__((packed));
981
982/*
983 * use the same struct for all sends.
984 */
985struct c2wr_send_req {
986 struct c2_sq_hdr sq_hdr;
987 u32 sge_len;
988 u32 remote_stag;
989 u8 data[0]; /* SGE array */
990} __attribute__((packed));
991
992union c2wr_send {
993 struct c2wr_send_req req;
994 struct c2wr_ce rep;
995} __attribute__((packed));
996
997struct c2wr_rdma_write_req {
998 struct c2_sq_hdr sq_hdr;
999 u64 remote_to;
1000 u32 remote_stag;
1001 u32 sge_len;
1002 u8 data[0]; /* SGE array */
1003} __attribute__((packed));
1004
1005union c2wr_rdma_write {
1006 struct c2wr_rdma_write_req req;
1007 struct c2wr_ce rep;
1008} __attribute__((packed));
1009
1010struct c2wr_rdma_read_req {
1011 struct c2_sq_hdr sq_hdr;
1012 u64 local_to;
1013 u64 remote_to;
1014 u32 local_stag;
1015 u32 remote_stag;
1016 u32 length;
1017} __attribute__((packed));
1018
1019union c2wr_rdma_read {
1020 struct c2wr_rdma_read_req req;
1021 struct c2wr_ce rep;
1022} __attribute__((packed));
1023
1024struct c2wr_mw_bind_req {
1025 struct c2_sq_hdr sq_hdr;
1026 u64 va;
1027 u8 stag_key;
1028 u8 pad[3];
1029 u32 mw_stag_index;
1030 u32 mr_stag_index;
1031 u32 length;
1032 u32 flags;
1033} __attribute__((packed));
1034
1035union c2wr_mw_bind {
1036 struct c2wr_mw_bind_req req;
1037 struct c2wr_ce rep;
1038} __attribute__((packed));
1039
1040struct c2wr_nsmr_fastreg_req {
1041 struct c2_sq_hdr sq_hdr;
1042 u64 va;
1043 u8 stag_key;
1044 u8 pad[3];
1045 u32 stag_index;
1046 u32 pbe_size;
1047 u32 fbo;
1048 u32 length;
1049 u32 addrs_length;
1050 /* array of paddrs (must be aligned on a 64bit boundary) */
1051 u64 paddrs[0];
1052} __attribute__((packed));
1053
1054union c2wr_nsmr_fastreg {
1055 struct c2wr_nsmr_fastreg_req req;
1056 struct c2wr_ce rep;
1057} __attribute__((packed));
1058
1059struct c2wr_stag_invalidate_req {
1060 struct c2_sq_hdr sq_hdr;
1061 u8 stag_key;
1062 u8 pad[3];
1063 u32 stag_index;
1064} __attribute__((packed));
1065
1066union c2wr_stag_invalidate {
1067 struct c2wr_stag_invalidate_req req;
1068 struct c2wr_ce rep;
1069} __attribute__((packed));
1070
1071union c2wr_sqwr {
1072 struct c2_sq_hdr sq_hdr;
1073 struct c2wr_send_req send;
1074 struct c2wr_send_req send_se;
1075 struct c2wr_send_req send_inv;
1076 struct c2wr_send_req send_se_inv;
1077 struct c2wr_rdma_write_req rdma_write;
1078 struct c2wr_rdma_read_req rdma_read;
1079 struct c2wr_mw_bind_req mw_bind;
1080 struct c2wr_nsmr_fastreg_req nsmr_fastreg;
1081 struct c2wr_stag_invalidate_req stag_inv;
1082} __attribute__((packed));
1083
1084
1085/*
1086 * RQ WRs
1087 */
1088struct c2wr_rqwr {
1089 struct c2_rq_hdr rq_hdr;
1090 u8 data[0]; /* array of SGEs */
1091} __attribute__((packed));
1092
1093union c2wr_recv {
1094 struct c2wr_rqwr req;
1095 struct c2wr_ce rep;
1096} __attribute__((packed));
1097
1098/*
1099 * All AEs start with this header. Most AEs only need to convey the
1100 * information in the header. Some, like LLP connection events, need
1101 * more info. The union typdef c2wr_ae_t has all the possible AEs.
1102 *
1103 * hdr.context is the user_context from the rnic_open WR. NULL If this
1104 * is not affiliated with an rnic
1105 *
1106 * hdr.id is the AE identifier (eg; CCAE_REMOTE_SHUTDOWN,
1107 * CCAE_LLP_CLOSE_COMPLETE)
1108 *
1109 * resource_type is one of: C2_RES_IND_QP, C2_RES_IND_CQ, C2_RES_IND_SRQ
1110 *
1111 * user_context is the context passed down when the host created the resource.
1112 */
1113struct c2wr_ae_hdr {
1114 struct c2wr_hdr hdr;
1115 u64 user_context; /* user context for this res. */
1116 u32 resource_type; /* see enum c2_resource_indicator */
1117 u32 resource; /* handle for resource */
1118 u32 qp_state; /* current QP State */
1119} __attribute__((packed));
1120
1121/*
1122 * After submitting the CCAE_ACTIVE_CONNECT_RESULTS message on the AEQ,
1123 * the adapter moves the QP into RTS state
1124 */
1125struct c2wr_ae_active_connect_results {
1126 struct c2wr_ae_hdr ae_hdr;
1127 u32 laddr;
1128 u32 raddr;
1129 u16 lport;
1130 u16 rport;
1131 u32 private_data_length;
1132 u8 private_data[0]; /* data is in-line in the msg. */
1133} __attribute__((packed));
1134
1135/*
1136 * When connections are established by the stack (and the private data
1137 * MPA frame is received), the adapter will generate an event to the host.
1138 * The details of the connection, any private data, and the new connection
1139 * request handle is passed up via the CCAE_CONNECTION_REQUEST msg on the
1140 * AE queue:
1141 */
1142struct c2wr_ae_connection_request {
1143 struct c2wr_ae_hdr ae_hdr;
1144 u32 cr_handle; /* connreq handle (sock ptr) */
1145 u32 laddr;
1146 u32 raddr;
1147 u16 lport;
1148 u16 rport;
1149 u32 private_data_length;
1150 u8 private_data[0]; /* data is in-line in the msg. */
1151} __attribute__((packed));
1152
1153union c2wr_ae {
1154 struct c2wr_ae_hdr ae_generic;
1155 struct c2wr_ae_active_connect_results ae_active_connect_results;
1156 struct c2wr_ae_connection_request ae_connection_request;
1157} __attribute__((packed));
1158
1159struct c2wr_init_req {
1160 struct c2wr_hdr hdr;
1161 u64 hint_count;
1162 u64 q0_host_shared;
1163 u64 q1_host_shared;
1164 u64 q1_host_msg_pool;
1165 u64 q2_host_shared;
1166 u64 q2_host_msg_pool;
1167} __attribute__((packed));
1168
1169struct c2wr_init_rep {
1170 struct c2wr_hdr hdr;
1171} __attribute__((packed));
1172
1173union c2wr_init {
1174 struct c2wr_init_req req;
1175 struct c2wr_init_rep rep;
1176} __attribute__((packed));
1177
1178/*
1179 * For upgrading flash.
1180 */
1181
1182struct c2wr_flash_init_req {
1183 struct c2wr_hdr hdr;
1184 u32 rnic_handle;
1185} __attribute__((packed));
1186
1187struct c2wr_flash_init_rep {
1188 struct c2wr_hdr hdr;
1189 u32 adapter_flash_buf_offset;
1190 u32 adapter_flash_len;
1191} __attribute__((packed));
1192
1193union c2wr_flash_init {
1194 struct c2wr_flash_init_req req;
1195 struct c2wr_flash_init_rep rep;
1196} __attribute__((packed));
1197
1198struct c2wr_flash_req {
1199 struct c2wr_hdr hdr;
1200 u32 rnic_handle;
1201 u32 len;
1202} __attribute__((packed));
1203
1204struct c2wr_flash_rep {
1205 struct c2wr_hdr hdr;
1206 u32 status;
1207} __attribute__((packed));
1208
1209union c2wr_flash {
1210 struct c2wr_flash_req req;
1211 struct c2wr_flash_rep rep;
1212} __attribute__((packed));
1213
1214struct c2wr_buf_alloc_req {
1215 struct c2wr_hdr hdr;
1216 u32 rnic_handle;
1217 u32 size;
1218} __attribute__((packed));
1219
1220struct c2wr_buf_alloc_rep {
1221 struct c2wr_hdr hdr;
1222 u32 offset; /* 0 if mem not available */
1223 u32 size; /* 0 if mem not available */
1224} __attribute__((packed));
1225
1226union c2wr_buf_alloc {
1227 struct c2wr_buf_alloc_req req;
1228 struct c2wr_buf_alloc_rep rep;
1229} __attribute__((packed));
1230
1231struct c2wr_buf_free_req {
1232 struct c2wr_hdr hdr;
1233 u32 rnic_handle;
1234 u32 offset; /* Must match value from alloc */
1235 u32 size; /* Must match value from alloc */
1236} __attribute__((packed));
1237
1238struct c2wr_buf_free_rep {
1239 struct c2wr_hdr hdr;
1240} __attribute__((packed));
1241
1242union c2wr_buf_free {
1243 struct c2wr_buf_free_req req;
1244 struct c2wr_ce rep;
1245} __attribute__((packed));
1246
1247struct c2wr_flash_write_req {
1248 struct c2wr_hdr hdr;
1249 u32 rnic_handle;
1250 u32 offset;
1251 u32 size;
1252 u32 type;
1253 u32 flags;
1254} __attribute__((packed));
1255
1256struct c2wr_flash_write_rep {
1257 struct c2wr_hdr hdr;
1258 u32 status;
1259} __attribute__((packed));
1260
1261union c2wr_flash_write {
1262 struct c2wr_flash_write_req req;
1263 struct c2wr_flash_write_rep rep;
1264} __attribute__((packed));
1265
1266/*
1267 * Messages for LLP connection setup.
1268 */
1269
1270/*
1271 * Listen Request. This allocates a listening endpoint to allow passive
1272 * connection setup. Newly established LLP connections are passed up
1273 * via an AE. See c2wr_ae_connection_request_t
1274 */
1275struct c2wr_ep_listen_create_req {
1276 struct c2wr_hdr hdr;
1277 u64 user_context; /* returned in AEs. */
1278 u32 rnic_handle;
1279 u32 local_addr; /* local addr, or 0 */
1280 u16 local_port; /* 0 means "pick one" */
1281 u16 pad;
1282 u32 backlog; /* tradional tcp listen bl */
1283} __attribute__((packed));
1284
1285struct c2wr_ep_listen_create_rep {
1286 struct c2wr_hdr hdr;
1287 u32 ep_handle; /* handle to new listening ep */
1288 u16 local_port; /* resulting port... */
1289 u16 pad;
1290} __attribute__((packed));
1291
1292union c2wr_ep_listen_create {
1293 struct c2wr_ep_listen_create_req req;
1294 struct c2wr_ep_listen_create_rep rep;
1295} __attribute__((packed));
1296
1297struct c2wr_ep_listen_destroy_req {
1298 struct c2wr_hdr hdr;
1299 u32 rnic_handle;
1300 u32 ep_handle;
1301} __attribute__((packed));
1302
1303struct c2wr_ep_listen_destroy_rep {
1304 struct c2wr_hdr hdr;
1305} __attribute__((packed));
1306
1307union c2wr_ep_listen_destroy {
1308 struct c2wr_ep_listen_destroy_req req;
1309 struct c2wr_ep_listen_destroy_rep rep;
1310} __attribute__((packed));
1311
1312struct c2wr_ep_query_req {
1313 struct c2wr_hdr hdr;
1314 u32 rnic_handle;
1315 u32 ep_handle;
1316} __attribute__((packed));
1317
1318struct c2wr_ep_query_rep {
1319 struct c2wr_hdr hdr;
1320 u32 rnic_handle;
1321 u32 local_addr;
1322 u32 remote_addr;
1323 u16 local_port;
1324 u16 remote_port;
1325} __attribute__((packed));
1326
1327union c2wr_ep_query {
1328 struct c2wr_ep_query_req req;
1329 struct c2wr_ep_query_rep rep;
1330} __attribute__((packed));
1331
1332
1333/*
1334 * The host passes this down to indicate acceptance of a pending iWARP
1335 * connection. The cr_handle was obtained from the CONNECTION_REQUEST
1336 * AE passed up by the adapter. See c2wr_ae_connection_request_t.
1337 */
1338struct c2wr_cr_accept_req {
1339 struct c2wr_hdr hdr;
1340 u32 rnic_handle;
1341 u32 qp_handle; /* QP to bind to this LLP conn */
1342 u32 ep_handle; /* LLP handle to accept */
1343 u32 private_data_length;
1344 u8 private_data[0]; /* data in-line in msg. */
1345} __attribute__((packed));
1346
1347/*
1348 * adapter sends reply when private data is successfully submitted to
1349 * the LLP.
1350 */
1351struct c2wr_cr_accept_rep {
1352 struct c2wr_hdr hdr;
1353} __attribute__((packed));
1354
1355union c2wr_cr_accept {
1356 struct c2wr_cr_accept_req req;
1357 struct c2wr_cr_accept_rep rep;
1358} __attribute__((packed));
1359
1360/*
1361 * The host sends this down if a given iWARP connection request was
1362 * rejected by the consumer. The cr_handle was obtained from a
1363 * previous c2wr_ae_connection_request_t AE sent by the adapter.
1364 */
1365struct c2wr_cr_reject_req {
1366 struct c2wr_hdr hdr;
1367 u32 rnic_handle;
1368 u32 ep_handle; /* LLP handle to reject */
1369} __attribute__((packed));
1370
1371/*
1372 * Dunno if this is needed, but we'll add it for now. The adapter will
1373 * send the reject_reply after the LLP endpoint has been destroyed.
1374 */
1375struct c2wr_cr_reject_rep {
1376 struct c2wr_hdr hdr;
1377} __attribute__((packed));
1378
1379union c2wr_cr_reject {
1380 struct c2wr_cr_reject_req req;
1381 struct c2wr_cr_reject_rep rep;
1382} __attribute__((packed));
1383
1384/*
1385 * console command. Used to implement a debug console over the verbs
1386 * request and reply queues.
1387 */
1388
1389/*
1390 * Console request message. It contains:
1391 * - message hdr with id = CCWR_CONSOLE
1392 * - the physaddr/len of host memory to be used for the reply.
1393 * - the command string. eg: "netstat -s" or "zoneinfo"
1394 */
1395struct c2wr_console_req {
1396 struct c2wr_hdr hdr; /* id = CCWR_CONSOLE */
1397 u64 reply_buf; /* pinned host buf for reply */
1398 u32 reply_buf_len; /* length of reply buffer */
1399 u8 command[0]; /* NUL terminated ascii string */
1400 /* containing the command req */
1401} __attribute__((packed));
1402
1403/*
1404 * flags used in the console reply.
1405 */
1406enum c2_console_flags {
1407 CONS_REPLY_TRUNCATED = 0x00000001 /* reply was truncated */
1408} __attribute__((packed));
1409
1410/*
1411 * Console reply message.
1412 * hdr.result contains the c2_status_t error if the reply was _not_ generated,
1413 * or C2_OK if the reply was generated.
1414 */
1415struct c2wr_console_rep {
1416 struct c2wr_hdr hdr; /* id = CCWR_CONSOLE */
1417 u32 flags;
1418} __attribute__((packed));
1419
1420union c2wr_console {
1421 struct c2wr_console_req req;
1422 struct c2wr_console_rep rep;
1423} __attribute__((packed));
1424
1425
1426/*
1427 * Giant union with all WRs. Makes life easier...
1428 */
1429union c2wr {
1430 struct c2wr_hdr hdr;
1431 struct c2wr_user_hdr user_hdr;
1432 union c2wr_rnic_open rnic_open;
1433 union c2wr_rnic_query rnic_query;
1434 union c2wr_rnic_getconfig rnic_getconfig;
1435 union c2wr_rnic_setconfig rnic_setconfig;
1436 union c2wr_rnic_close rnic_close;
1437 union c2wr_cq_create cq_create;
1438 union c2wr_cq_modify cq_modify;
1439 union c2wr_cq_destroy cq_destroy;
1440 union c2wr_pd_alloc pd_alloc;
1441 union c2wr_pd_dealloc pd_dealloc;
1442 union c2wr_srq_create srq_create;
1443 union c2wr_srq_destroy srq_destroy;
1444 union c2wr_qp_create qp_create;
1445 union c2wr_qp_query qp_query;
1446 union c2wr_qp_modify qp_modify;
1447 union c2wr_qp_destroy qp_destroy;
1448 struct c2wr_qp_connect qp_connect;
1449 union c2wr_nsmr_stag_alloc nsmr_stag_alloc;
1450 union c2wr_nsmr_register nsmr_register;
1451 union c2wr_nsmr_pbl nsmr_pbl;
1452 union c2wr_mr_query mr_query;
1453 union c2wr_mw_query mw_query;
1454 union c2wr_stag_dealloc stag_dealloc;
1455 union c2wr_sqwr sqwr;
1456 struct c2wr_rqwr rqwr;
1457 struct c2wr_ce ce;
1458 union c2wr_ae ae;
1459 union c2wr_init init;
1460 union c2wr_ep_listen_create ep_listen_create;
1461 union c2wr_ep_listen_destroy ep_listen_destroy;
1462 union c2wr_cr_accept cr_accept;
1463 union c2wr_cr_reject cr_reject;
1464 union c2wr_console console;
1465 union c2wr_flash_init flash_init;
1466 union c2wr_flash flash;
1467 union c2wr_buf_alloc buf_alloc;
1468 union c2wr_buf_free buf_free;
1469 union c2wr_flash_write flash_write;
1470} __attribute__((packed));
1471
1472
1473/*
1474 * Accessors for the wr fields that are packed together tightly to
1475 * reduce the wr message size. The wr arguments are void* so that
1476 * either a struct c2wr*, a struct c2wr_hdr*, or a pointer to any of the types
1477 * in the struct c2wr union can be passed in.
1478 */
1479static __inline__ u8 c2_wr_get_id(void *wr)
1480{
1481 return ((struct c2wr_hdr *) wr)->id;
1482}
1483static __inline__ void c2_wr_set_id(void *wr, u8 id)
1484{
1485 ((struct c2wr_hdr *) wr)->id = id;
1486}
1487static __inline__ u8 c2_wr_get_result(void *wr)
1488{
1489 return ((struct c2wr_hdr *) wr)->result;
1490}
1491static __inline__ void c2_wr_set_result(void *wr, u8 result)
1492{
1493 ((struct c2wr_hdr *) wr)->result = result;
1494}
1495static __inline__ u8 c2_wr_get_flags(void *wr)
1496{
1497 return ((struct c2wr_hdr *) wr)->flags;
1498}
1499static __inline__ void c2_wr_set_flags(void *wr, u8 flags)
1500{
1501 ((struct c2wr_hdr *) wr)->flags = flags;
1502}
1503static __inline__ u8 c2_wr_get_sge_count(void *wr)
1504{
1505 return ((struct c2wr_hdr *) wr)->sge_count;
1506}
1507static __inline__ void c2_wr_set_sge_count(void *wr, u8 sge_count)
1508{
1509 ((struct c2wr_hdr *) wr)->sge_count = sge_count;
1510}
1511static __inline__ u32 c2_wr_get_wqe_count(void *wr)
1512{
1513 return ((struct c2wr_hdr *) wr)->wqe_count;
1514}
1515static __inline__ void c2_wr_set_wqe_count(void *wr, u32 wqe_count)
1516{
1517 ((struct c2wr_hdr *) wr)->wqe_count = wqe_count;
1518}
1519
1520#endif /* _C2_WR_H_ */
diff --git a/drivers/infiniband/hw/ehca/Kconfig b/drivers/infiniband/hw/ehca/Kconfig
new file mode 100644
index 000000000000..922389b64394
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/Kconfig
@@ -0,0 +1,16 @@
1config INFINIBAND_EHCA
2 tristate "eHCA support"
3 depends on IBMEBUS && INFINIBAND
4 ---help---
5 This driver supports the IBM pSeries eHCA InfiniBand adapter.
6
7 To compile the driver as a module, choose M here. The module
8 will be called ib_ehca.
9
10config INFINIBAND_EHCA_SCALING
11 bool "Scaling support (EXPERIMENTAL)"
12 depends on IBMEBUS && INFINIBAND_EHCA && HOTPLUG_CPU && EXPERIMENTAL
13 ---help---
14 eHCA scaling support schedules the CQ callbacks to different CPUs.
15
16 To enable this feature choose Y here.
diff --git a/drivers/infiniband/hw/ehca/Makefile b/drivers/infiniband/hw/ehca/Makefile
new file mode 100644
index 000000000000..74d284e46a40
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/Makefile
@@ -0,0 +1,16 @@
1# Authors: Heiko J Schick <schickhj@de.ibm.com>
2# Christoph Raisch <raisch@de.ibm.com>
3# Joachim Fenkes <fenkes@de.ibm.com>
4#
5# Copyright (c) 2005 IBM Corporation
6#
7# All rights reserved.
8#
9# This source code is distributed under a dual license of GPL v2.0 and OpenIB BSD.
10
11obj-$(CONFIG_INFINIBAND_EHCA) += ib_ehca.o
12
13ib_ehca-objs = ehca_main.o ehca_hca.o ehca_mcast.o ehca_pd.o ehca_av.o ehca_eq.o \
14 ehca_cq.o ehca_qp.o ehca_sqp.o ehca_mrmw.o ehca_reqs.o ehca_irq.o \
15 ehca_uverbs.o ipz_pt_fn.o hcp_if.o hcp_phyp.o
16
diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/infiniband/hw/ehca/ehca_av.c
new file mode 100644
index 000000000000..3bac197f9014
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_av.c
@@ -0,0 +1,271 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * adress vector functions
5 *
6 * Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
7 * Khadija Souissi <souissik@de.ibm.com>
8 * Reinhard Ernst <rernst@de.ibm.com>
9 * Christoph Raisch <raisch@de.ibm.com>
10 *
11 * Copyright (c) 2005 IBM Corporation
12 *
13 * All rights reserved.
14 *
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
16 * BSD.
17 *
18 * OpenIB BSD License
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
22 *
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
25 *
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
42 */
43
44
45#include <asm/current.h>
46
47#include "ehca_tools.h"
48#include "ehca_iverbs.h"
49#include "hcp_if.h"
50
51static struct kmem_cache *av_cache;
52
53struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
54{
55 int ret;
56 struct ehca_av *av;
57 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
58 ib_device);
59
60 av = kmem_cache_alloc(av_cache, SLAB_KERNEL);
61 if (!av) {
62 ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p",
63 pd, ah_attr);
64 return ERR_PTR(-ENOMEM);
65 }
66
67 av->av.sl = ah_attr->sl;
68 av->av.dlid = ah_attr->dlid;
69 av->av.slid_path_bits = ah_attr->src_path_bits;
70
71 if (ehca_static_rate < 0) {
72 int ah_mult = ib_rate_to_mult(ah_attr->static_rate);
73 int ehca_mult =
74 ib_rate_to_mult(shca->sport[ah_attr->port_num].rate );
75
76 if (ah_mult >= ehca_mult)
77 av->av.ipd = 0;
78 else
79 av->av.ipd = (ah_mult > 0) ?
80 ((ehca_mult - 1) / ah_mult) : 0;
81 } else
82 av->av.ipd = ehca_static_rate;
83
84 av->av.lnh = ah_attr->ah_flags;
85 av->av.grh.word_0 = EHCA_BMASK_SET(GRH_IPVERSION_MASK, 6);
86 av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_TCLASS_MASK,
87 ah_attr->grh.traffic_class);
88 av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
89 ah_attr->grh.flow_label);
90 av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
91 ah_attr->grh.hop_limit);
92 av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1B);
93 /* set sgid in grh.word_1 */
94 if (ah_attr->ah_flags & IB_AH_GRH) {
95 int rc;
96 struct ib_port_attr port_attr;
97 union ib_gid gid;
98 memset(&port_attr, 0, sizeof(port_attr));
99 rc = ehca_query_port(pd->device, ah_attr->port_num,
100 &port_attr);
101 if (rc) { /* invalid port number */
102 ret = -EINVAL;
103 ehca_err(pd->device, "Invalid port number "
104 "ehca_query_port() returned %x "
105 "pd=%p ah_attr=%p", rc, pd, ah_attr);
106 goto create_ah_exit1;
107 }
108 memset(&gid, 0, sizeof(gid));
109 rc = ehca_query_gid(pd->device,
110 ah_attr->port_num,
111 ah_attr->grh.sgid_index, &gid);
112 if (rc) {
113 ret = -EINVAL;
114 ehca_err(pd->device, "Failed to retrieve sgid "
115 "ehca_query_gid() returned %x "
116 "pd=%p ah_attr=%p", rc, pd, ah_attr);
117 goto create_ah_exit1;
118 }
119 memcpy(&av->av.grh.word_1, &gid, sizeof(gid));
120 }
121 /* for the time being we use a hard coded PMTU of 2048 Bytes */
122 av->av.pmtu = 4;
123
124 /* dgid comes in grh.word_3 */
125 memcpy(&av->av.grh.word_3, &ah_attr->grh.dgid,
126 sizeof(ah_attr->grh.dgid));
127
128 return &av->ib_ah;
129
130create_ah_exit1:
131 kmem_cache_free(av_cache, av);
132
133 return ERR_PTR(ret);
134}
135
136int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
137{
138 struct ehca_av *av;
139 struct ehca_ud_av new_ehca_av;
140 struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
141 u32 cur_pid = current->tgid;
142
143 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
144 my_pd->ownpid != cur_pid) {
145 ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
146 cur_pid, my_pd->ownpid);
147 return -EINVAL;
148 }
149
150 memset(&new_ehca_av, 0, sizeof(new_ehca_av));
151 new_ehca_av.sl = ah_attr->sl;
152 new_ehca_av.dlid = ah_attr->dlid;
153 new_ehca_av.slid_path_bits = ah_attr->src_path_bits;
154 new_ehca_av.ipd = ah_attr->static_rate;
155 new_ehca_av.lnh = EHCA_BMASK_SET(GRH_FLAG_MASK,
156 (ah_attr->ah_flags & IB_AH_GRH) > 0);
157 new_ehca_av.grh.word_0 = EHCA_BMASK_SET(GRH_TCLASS_MASK,
158 ah_attr->grh.traffic_class);
159 new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
160 ah_attr->grh.flow_label);
161 new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
162 ah_attr->grh.hop_limit);
163 new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1b);
164
165 /* set sgid in grh.word_1 */
166 if (ah_attr->ah_flags & IB_AH_GRH) {
167 int rc;
168 struct ib_port_attr port_attr;
169 union ib_gid gid;
170 memset(&port_attr, 0, sizeof(port_attr));
171 rc = ehca_query_port(ah->device, ah_attr->port_num,
172 &port_attr);
173 if (rc) { /* invalid port number */
174 ehca_err(ah->device, "Invalid port number "
175 "ehca_query_port() returned %x "
176 "ah=%p ah_attr=%p port_num=%x",
177 rc, ah, ah_attr, ah_attr->port_num);
178 return -EINVAL;
179 }
180 memset(&gid, 0, sizeof(gid));
181 rc = ehca_query_gid(ah->device,
182 ah_attr->port_num,
183 ah_attr->grh.sgid_index, &gid);
184 if (rc) {
185 ehca_err(ah->device, "Failed to retrieve sgid "
186 "ehca_query_gid() returned %x "
187 "ah=%p ah_attr=%p port_num=%x "
188 "sgid_index=%x",
189 rc, ah, ah_attr, ah_attr->port_num,
190 ah_attr->grh.sgid_index);
191 return -EINVAL;
192 }
193 memcpy(&new_ehca_av.grh.word_1, &gid, sizeof(gid));
194 }
195
196 new_ehca_av.pmtu = 4; /* see also comment in create_ah() */
197
198 memcpy(&new_ehca_av.grh.word_3, &ah_attr->grh.dgid,
199 sizeof(ah_attr->grh.dgid));
200
201 av = container_of(ah, struct ehca_av, ib_ah);
202 av->av = new_ehca_av;
203
204 return 0;
205}
206
207int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
208{
209 struct ehca_av *av = container_of(ah, struct ehca_av, ib_ah);
210 struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
211 u32 cur_pid = current->tgid;
212
213 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
214 my_pd->ownpid != cur_pid) {
215 ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
216 cur_pid, my_pd->ownpid);
217 return -EINVAL;
218 }
219
220 memcpy(&ah_attr->grh.dgid, &av->av.grh.word_3,
221 sizeof(ah_attr->grh.dgid));
222 ah_attr->sl = av->av.sl;
223
224 ah_attr->dlid = av->av.dlid;
225
226 ah_attr->src_path_bits = av->av.slid_path_bits;
227 ah_attr->static_rate = av->av.ipd;
228 ah_attr->ah_flags = EHCA_BMASK_GET(GRH_FLAG_MASK, av->av.lnh);
229 ah_attr->grh.traffic_class = EHCA_BMASK_GET(GRH_TCLASS_MASK,
230 av->av.grh.word_0);
231 ah_attr->grh.hop_limit = EHCA_BMASK_GET(GRH_HOPLIMIT_MASK,
232 av->av.grh.word_0);
233 ah_attr->grh.flow_label = EHCA_BMASK_GET(GRH_FLOWLABEL_MASK,
234 av->av.grh.word_0);
235
236 return 0;
237}
238
239int ehca_destroy_ah(struct ib_ah *ah)
240{
241 struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
242 u32 cur_pid = current->tgid;
243
244 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
245 my_pd->ownpid != cur_pid) {
246 ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
247 cur_pid, my_pd->ownpid);
248 return -EINVAL;
249 }
250
251 kmem_cache_free(av_cache, container_of(ah, struct ehca_av, ib_ah));
252
253 return 0;
254}
255
256int ehca_init_av_cache(void)
257{
258 av_cache = kmem_cache_create("ehca_cache_av",
259 sizeof(struct ehca_av), 0,
260 SLAB_HWCACHE_ALIGN,
261 NULL, NULL);
262 if (!av_cache)
263 return -ENOMEM;
264 return 0;
265}
266
267void ehca_cleanup_av_cache(void)
268{
269 if (av_cache)
270 kmem_cache_destroy(av_cache);
271}
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
new file mode 100644
index 000000000000..1c722032319c
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -0,0 +1,346 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Struct definition for eHCA internal structures
5 *
6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#ifndef __EHCA_CLASSES_H__
43#define __EHCA_CLASSES_H__
44
45#include "ehca_classes.h"
46#include "ipz_pt_fn.h"
47
48struct ehca_module;
49struct ehca_qp;
50struct ehca_cq;
51struct ehca_eq;
52struct ehca_mr;
53struct ehca_mw;
54struct ehca_pd;
55struct ehca_av;
56
57#ifdef CONFIG_PPC64
58#include "ehca_classes_pSeries.h"
59#endif
60
61#include <rdma/ib_verbs.h>
62#include <rdma/ib_user_verbs.h>
63
64#include "ehca_irq.h"
65
66struct ehca_eq {
67 u32 length;
68 struct ipz_queue ipz_queue;
69 struct ipz_eq_handle ipz_eq_handle;
70 struct work_struct work;
71 struct h_galpas galpas;
72 int is_initialized;
73 struct ehca_pfeq pf;
74 spinlock_t spinlock;
75 struct tasklet_struct interrupt_task;
76 u32 ist;
77};
78
79struct ehca_sport {
80 struct ib_cq *ibcq_aqp1;
81 struct ib_qp *ibqp_aqp1;
82 enum ib_rate rate;
83 enum ib_port_state port_state;
84};
85
86struct ehca_shca {
87 struct ib_device ib_device;
88 struct ibmebus_dev *ibmebus_dev;
89 u8 num_ports;
90 int hw_level;
91 struct list_head shca_list;
92 struct ipz_adapter_handle ipz_hca_handle;
93 struct ehca_sport sport[2];
94 struct ehca_eq eq;
95 struct ehca_eq neq;
96 struct ehca_mr *maxmr;
97 struct ehca_pd *pd;
98 struct h_galpas galpas;
99};
100
101struct ehca_pd {
102 struct ib_pd ib_pd;
103 struct ipz_pd fw_pd;
104 u32 ownpid;
105};
106
107struct ehca_qp {
108 struct ib_qp ib_qp;
109 u32 qp_type;
110 struct ipz_queue ipz_squeue;
111 struct ipz_queue ipz_rqueue;
112 struct h_galpas galpas;
113 u32 qkey;
114 u32 real_qp_num;
115 u32 token;
116 spinlock_t spinlock_s;
117 spinlock_t spinlock_r;
118 u32 sq_max_inline_data_size;
119 struct ipz_qp_handle ipz_qp_handle;
120 struct ehca_pfqp pf;
121 struct ib_qp_init_attr init_attr;
122 u64 uspace_squeue;
123 u64 uspace_rqueue;
124 u64 uspace_fwh;
125 struct ehca_cq *send_cq;
126 struct ehca_cq *recv_cq;
127 unsigned int sqerr_purgeflag;
128 struct hlist_node list_entries;
129};
130
131/* must be power of 2 */
132#define QP_HASHTAB_LEN 8
133
134struct ehca_cq {
135 struct ib_cq ib_cq;
136 struct ipz_queue ipz_queue;
137 struct h_galpas galpas;
138 spinlock_t spinlock;
139 u32 cq_number;
140 u32 token;
141 u32 nr_of_entries;
142 struct ipz_cq_handle ipz_cq_handle;
143 struct ehca_pfcq pf;
144 spinlock_t cb_lock;
145 u64 uspace_queue;
146 u64 uspace_fwh;
147 struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
148 struct list_head entry;
149 u32 nr_callbacks;
150 spinlock_t task_lock;
151 u32 ownpid;
152};
153
154enum ehca_mr_flag {
155 EHCA_MR_FLAG_FMR = 0x80000000, /* FMR, created with ehca_alloc_fmr */
156 EHCA_MR_FLAG_MAXMR = 0x40000000, /* max-MR */
157};
158
159struct ehca_mr {
160 union {
161 struct ib_mr ib_mr; /* must always be first in ehca_mr */
162 struct ib_fmr ib_fmr; /* must always be first in ehca_mr */
163 } ib;
164 spinlock_t mrlock;
165
166 enum ehca_mr_flag flags;
167 u32 num_pages; /* number of MR pages */
168 u32 num_4k; /* number of 4k "page" portions to form MR */
169 int acl; /* ACL (stored here for usage in reregister) */
170 u64 *start; /* virtual start address (stored here for */
171 /* usage in reregister) */
172 u64 size; /* size (stored here for usage in reregister) */
173 u32 fmr_page_size; /* page size for FMR */
174 u32 fmr_max_pages; /* max pages for FMR */
175 u32 fmr_max_maps; /* max outstanding maps for FMR */
176 u32 fmr_map_cnt; /* map counter for FMR */
177 /* fw specific data */
178 struct ipz_mrmw_handle ipz_mr_handle; /* MR handle for h-calls */
179 struct h_galpas galpas;
180 /* data for userspace bridge */
181 u32 nr_of_pages;
182 void *pagearray;
183};
184
185struct ehca_mw {
186 struct ib_mw ib_mw; /* gen2 mw, must always be first in ehca_mw */
187 spinlock_t mwlock;
188
189 u8 never_bound; /* indication MW was never bound */
190 struct ipz_mrmw_handle ipz_mw_handle; /* MW handle for h-calls */
191 struct h_galpas galpas;
192};
193
194enum ehca_mr_pgi_type {
195 EHCA_MR_PGI_PHYS = 1, /* type of ehca_reg_phys_mr,
196 * ehca_rereg_phys_mr,
197 * ehca_reg_internal_maxmr */
198 EHCA_MR_PGI_USER = 2, /* type of ehca_reg_user_mr */
199 EHCA_MR_PGI_FMR = 3 /* type of ehca_map_phys_fmr */
200};
201
202struct ehca_mr_pginfo {
203 enum ehca_mr_pgi_type type;
204 u64 num_pages;
205 u64 page_cnt;
206 u64 num_4k; /* number of 4k "page" portions */
207 u64 page_4k_cnt; /* counter for 4k "page" portions */
208 u64 next_4k; /* next 4k "page" portion in buffer/chunk/listelem */
209
210 /* type EHCA_MR_PGI_PHYS section */
211 int num_phys_buf;
212 struct ib_phys_buf *phys_buf_array;
213 u64 next_buf;
214
215 /* type EHCA_MR_PGI_USER section */
216 struct ib_umem *region;
217 struct ib_umem_chunk *next_chunk;
218 u64 next_nmap;
219
220 /* type EHCA_MR_PGI_FMR section */
221 u64 *page_list;
222 u64 next_listelem;
223 /* next_4k also used within EHCA_MR_PGI_FMR */
224};
225
226/* output parameters for MR/FMR hipz calls */
227struct ehca_mr_hipzout_parms {
228 struct ipz_mrmw_handle handle;
229 u32 lkey;
230 u32 rkey;
231 u64 len;
232 u64 vaddr;
233 u32 acl;
234};
235
236/* output parameters for MW hipz calls */
237struct ehca_mw_hipzout_parms {
238 struct ipz_mrmw_handle handle;
239 u32 rkey;
240};
241
242struct ehca_av {
243 struct ib_ah ib_ah;
244 struct ehca_ud_av av;
245};
246
247struct ehca_ucontext {
248 struct ib_ucontext ib_ucontext;
249};
250
251struct ehca_module *ehca_module_new(void);
252
253int ehca_module_delete(struct ehca_module *me);
254
255int ehca_eq_ctor(struct ehca_eq *eq);
256
257int ehca_eq_dtor(struct ehca_eq *eq);
258
259struct ehca_shca *ehca_shca_new(void);
260
261int ehca_shca_delete(struct ehca_shca *me);
262
263struct ehca_sport *ehca_sport_new(struct ehca_shca *anchor);
264
265int ehca_init_pd_cache(void);
266void ehca_cleanup_pd_cache(void);
267int ehca_init_cq_cache(void);
268void ehca_cleanup_cq_cache(void);
269int ehca_init_qp_cache(void);
270void ehca_cleanup_qp_cache(void);
271int ehca_init_av_cache(void);
272void ehca_cleanup_av_cache(void);
273int ehca_init_mrmw_cache(void);
274void ehca_cleanup_mrmw_cache(void);
275
276extern spinlock_t ehca_qp_idr_lock;
277extern spinlock_t ehca_cq_idr_lock;
278extern struct idr ehca_qp_idr;
279extern struct idr ehca_cq_idr;
280
281extern int ehca_static_rate;
282extern int ehca_port_act_time;
283extern int ehca_use_hp_mr;
284
285struct ipzu_queue_resp {
286 u64 queue; /* points to first queue entry */
287 u32 qe_size; /* queue entry size */
288 u32 act_nr_of_sg;
289 u32 queue_length; /* queue length allocated in bytes */
290 u32 pagesize;
291 u32 toggle_state;
292 u32 dummy; /* padding for 8 byte alignment */
293};
294
295struct ehca_create_cq_resp {
296 u32 cq_number;
297 u32 token;
298 struct ipzu_queue_resp ipz_queue;
299 struct h_galpas galpas;
300};
301
302struct ehca_create_qp_resp {
303 u32 qp_num;
304 u32 token;
305 u32 qp_type;
306 u32 qkey;
307 /* qp_num assigned by ehca: sqp0/1 may have got different numbers */
308 u32 real_qp_num;
309 u32 dummy; /* padding for 8 byte alignment */
310 struct ipzu_queue_resp ipz_squeue;
311 struct ipzu_queue_resp ipz_rqueue;
312 struct h_galpas galpas;
313};
314
315struct ehca_alloc_cq_parms {
316 u32 nr_cqe;
317 u32 act_nr_of_entries;
318 u32 act_pages;
319 struct ipz_eq_handle eq_handle;
320};
321
322struct ehca_alloc_qp_parms {
323 int servicetype;
324 int sigtype;
325 int daqp_ctrl;
326 int max_send_sge;
327 int max_recv_sge;
328 int ud_av_l_key_ctl;
329
330 u16 act_nr_send_wqes;
331 u16 act_nr_recv_wqes;
332 u8 act_nr_recv_sges;
333 u8 act_nr_send_sges;
334
335 u32 nr_rq_pages;
336 u32 nr_sq_pages;
337
338 struct ipz_eq_handle ipz_eq_handle;
339 struct ipz_pd pd;
340};
341
342int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp);
343int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num);
344struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int qp_num);
345
346#endif
diff --git a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
new file mode 100644
index 000000000000..5665f213b81a
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
@@ -0,0 +1,236 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * pSeries interface definitions
5 *
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#ifndef __EHCA_CLASSES_PSERIES_H__
43#define __EHCA_CLASSES_PSERIES_H__
44
45#include "hcp_phyp.h"
46#include "ipz_pt_fn.h"
47
48
49struct ehca_pfqp {
50 struct ipz_qpt sqpt;
51 struct ipz_qpt rqpt;
52};
53
54struct ehca_pfcq {
55 struct ipz_qpt qpt;
56 u32 cqnr;
57};
58
59struct ehca_pfeq {
60 struct ipz_qpt qpt;
61 struct h_galpa galpa;
62 u32 eqnr;
63};
64
65struct ipz_adapter_handle {
66 u64 handle;
67};
68
69struct ipz_cq_handle {
70 u64 handle;
71};
72
73struct ipz_eq_handle {
74 u64 handle;
75};
76
77struct ipz_qp_handle {
78 u64 handle;
79};
80struct ipz_mrmw_handle {
81 u64 handle;
82};
83
84struct ipz_pd {
85 u32 value;
86};
87
88struct hcp_modify_qp_control_block {
89 u32 qkey; /* 00 */
90 u32 rdd; /* reliable datagram domain */
91 u32 send_psn; /* 02 */
92 u32 receive_psn; /* 03 */
93 u32 prim_phys_port; /* 04 */
94 u32 alt_phys_port; /* 05 */
95 u32 prim_p_key_idx; /* 06 */
96 u32 alt_p_key_idx; /* 07 */
97 u32 rdma_atomic_ctrl; /* 08 */
98 u32 qp_state; /* 09 */
99 u32 reserved_10; /* 10 */
100 u32 rdma_nr_atomic_resp_res; /* 11 */
101 u32 path_migration_state; /* 12 */
102 u32 rdma_atomic_outst_dest_qp; /* 13 */
103 u32 dest_qp_nr; /* 14 */
104 u32 min_rnr_nak_timer_field; /* 15 */
105 u32 service_level; /* 16 */
106 u32 send_grh_flag; /* 17 */
107 u32 retry_count; /* 18 */
108 u32 timeout; /* 19 */
109 u32 path_mtu; /* 20 */
110 u32 max_static_rate; /* 21 */
111 u32 dlid; /* 22 */
112 u32 rnr_retry_count; /* 23 */
113 u32 source_path_bits; /* 24 */
114 u32 traffic_class; /* 25 */
115 u32 hop_limit; /* 26 */
116 u32 source_gid_idx; /* 27 */
117 u32 flow_label; /* 28 */
118 u32 reserved_29; /* 29 */
119 union { /* 30 */
120 u64 dw[2];
121 u8 byte[16];
122 } dest_gid;
123 u32 service_level_al; /* 34 */
124 u32 send_grh_flag_al; /* 35 */
125 u32 retry_count_al; /* 36 */
126 u32 timeout_al; /* 37 */
127 u32 max_static_rate_al; /* 38 */
128 u32 dlid_al; /* 39 */
129 u32 rnr_retry_count_al; /* 40 */
130 u32 source_path_bits_al; /* 41 */
131 u32 traffic_class_al; /* 42 */
132 u32 hop_limit_al; /* 43 */
133 u32 source_gid_idx_al; /* 44 */
134 u32 flow_label_al; /* 45 */
135 u32 reserved_46; /* 46 */
136 u32 reserved_47; /* 47 */
137 union { /* 48 */
138 u64 dw[2];
139 u8 byte[16];
140 } dest_gid_al;
141 u32 max_nr_outst_send_wr; /* 52 */
142 u32 max_nr_outst_recv_wr; /* 53 */
143 u32 disable_ete_credit_check; /* 54 */
144 u32 qp_number; /* 55 */
145 u64 send_queue_handle; /* 56 */
146 u64 recv_queue_handle; /* 58 */
147 u32 actual_nr_sges_in_sq_wqe; /* 60 */
148 u32 actual_nr_sges_in_rq_wqe; /* 61 */
149 u32 qp_enable; /* 62 */
150 u32 curr_srq_limit; /* 63 */
151 u64 qp_aff_asyn_ev_log_reg; /* 64 */
152 u64 shared_rq_hndl; /* 66 */
153 u64 trigg_doorbell_qp_hndl; /* 68 */
154 u32 reserved_70_127[58]; /* 70 */
155};
156
157#define MQPCB_MASK_QKEY EHCA_BMASK_IBM(0,0)
158#define MQPCB_MASK_SEND_PSN EHCA_BMASK_IBM(2,2)
159#define MQPCB_MASK_RECEIVE_PSN EHCA_BMASK_IBM(3,3)
160#define MQPCB_MASK_PRIM_PHYS_PORT EHCA_BMASK_IBM(4,4)
161#define MQPCB_PRIM_PHYS_PORT EHCA_BMASK_IBM(24,31)
162#define MQPCB_MASK_ALT_PHYS_PORT EHCA_BMASK_IBM(5,5)
163#define MQPCB_MASK_PRIM_P_KEY_IDX EHCA_BMASK_IBM(6,6)
164#define MQPCB_PRIM_P_KEY_IDX EHCA_BMASK_IBM(24,31)
165#define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM(7,7)
166#define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM(8,8)
167#define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM(9,9)
168#define MQPCB_QP_STATE EHCA_BMASK_IBM(24,31)
169#define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11,11)
170#define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12,12)
171#define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13,13)
172#define MQPCB_MASK_DEST_QP_NR EHCA_BMASK_IBM(14,14)
173#define MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD EHCA_BMASK_IBM(15,15)
174#define MQPCB_MASK_SERVICE_LEVEL EHCA_BMASK_IBM(16,16)
175#define MQPCB_MASK_SEND_GRH_FLAG EHCA_BMASK_IBM(17,17)
176#define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18,18)
177#define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19,19)
178#define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20,20)
179#define MQPCB_PATH_MTU EHCA_BMASK_IBM(24,31)
180#define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21,21)
181#define MQPCB_MAX_STATIC_RATE EHCA_BMASK_IBM(24,31)
182#define MQPCB_MASK_DLID EHCA_BMASK_IBM(22,22)
183#define MQPCB_DLID EHCA_BMASK_IBM(16,31)
184#define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23,23)
185#define MQPCB_RNR_RETRY_COUNT EHCA_BMASK_IBM(29,31)
186#define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24,24)
187#define MQPCB_SOURCE_PATH_BITS EHCA_BMASK_IBM(25,31)
188#define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25,25)
189#define MQPCB_TRAFFIC_CLASS EHCA_BMASK_IBM(24,31)
190#define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26,26)
191#define MQPCB_HOP_LIMIT EHCA_BMASK_IBM(24,31)
192#define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27,27)
193#define MQPCB_SOURCE_GID_IDX EHCA_BMASK_IBM(24,31)
194#define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28,28)
195#define MQPCB_FLOW_LABEL EHCA_BMASK_IBM(12,31)
196#define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30,30)
197#define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31,31)
198#define MQPCB_SERVICE_LEVEL_AL EHCA_BMASK_IBM(28,31)
199#define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32,32)
200#define MQPCB_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(31,31)
201#define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33,33)
202#define MQPCB_RETRY_COUNT_AL EHCA_BMASK_IBM(29,31)
203#define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34,34)
204#define MQPCB_TIMEOUT_AL EHCA_BMASK_IBM(27,31)
205#define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35,35)
206#define MQPCB_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(24,31)
207#define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36,36)
208#define MQPCB_DLID_AL EHCA_BMASK_IBM(16,31)
209#define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37,37)
210#define MQPCB_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(29,31)
211#define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38,38)
212#define MQPCB_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(25,31)
213#define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39,39)
214#define MQPCB_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(24,31)
215#define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40,40)
216#define MQPCB_HOP_LIMIT_AL EHCA_BMASK_IBM(24,31)
217#define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41,41)
218#define MQPCB_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(24,31)
219#define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42,42)
220#define MQPCB_FLOW_LABEL_AL EHCA_BMASK_IBM(12,31)
221#define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44,44)
222#define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45,45)
223#define MQPCB_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(16,31)
224#define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46,46)
225#define MQPCB_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(16,31)
226#define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47,47)
227#define MQPCB_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(31,31)
228#define MQPCB_QP_NUMBER EHCA_BMASK_IBM(8,31)
229#define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48,48)
230#define MQPCB_QP_ENABLE EHCA_BMASK_IBM(31,31)
231#define MQPCB_MASK_CURR_SQR_LIMIT EHCA_BMASK_IBM(49,49)
232#define MQPCB_CURR_SQR_LIMIT EHCA_BMASK_IBM(15,31)
233#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50,50)
234#define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51,51)
235
236#endif /* __EHCA_CLASSES_PSERIES_H__ */
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
new file mode 100644
index 000000000000..458fe19648a1
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -0,0 +1,427 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Completion queue handling
5 *
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Khadija Souissi <souissi@de.ibm.com>
8 * Reinhard Ernst <rernst@de.ibm.com>
9 * Heiko J Schick <schickhj@de.ibm.com>
10 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
11 *
12 *
13 * Copyright (c) 2005 IBM Corporation
14 *
15 * All rights reserved.
16 *
17 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
18 * BSD.
19 *
20 * OpenIB BSD License
21 *
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions are met:
24 *
25 * Redistributions of source code must retain the above copyright notice, this
26 * list of conditions and the following disclaimer.
27 *
28 * Redistributions in binary form must reproduce the above copyright notice,
29 * this list of conditions and the following disclaimer in the documentation
30 * and/or other materials
31 * provided with the distribution.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
34 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
37 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
38 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
39 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
40 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
41 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
43 * POSSIBILITY OF SUCH DAMAGE.
44 */
45
46#include <asm/current.h>
47
48#include "ehca_iverbs.h"
49#include "ehca_classes.h"
50#include "ehca_irq.h"
51#include "hcp_if.h"
52
53static struct kmem_cache *cq_cache;
54
55int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp)
56{
57 unsigned int qp_num = qp->real_qp_num;
58 unsigned int key = qp_num & (QP_HASHTAB_LEN-1);
59 unsigned long spl_flags;
60
61 spin_lock_irqsave(&cq->spinlock, spl_flags);
62 hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]);
63 spin_unlock_irqrestore(&cq->spinlock, spl_flags);
64
65 ehca_dbg(cq->ib_cq.device, "cq_num=%x real_qp_num=%x",
66 cq->cq_number, qp_num);
67
68 return 0;
69}
70
71int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num)
72{
73 int ret = -EINVAL;
74 unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
75 struct hlist_node *iter;
76 struct ehca_qp *qp;
77 unsigned long spl_flags;
78
79 spin_lock_irqsave(&cq->spinlock, spl_flags);
80 hlist_for_each(iter, &cq->qp_hashtab[key]) {
81 qp = hlist_entry(iter, struct ehca_qp, list_entries);
82 if (qp->real_qp_num == real_qp_num) {
83 hlist_del(iter);
84 ehca_dbg(cq->ib_cq.device,
85 "removed qp from cq .cq_num=%x real_qp_num=%x",
86 cq->cq_number, real_qp_num);
87 ret = 0;
88 break;
89 }
90 }
91 spin_unlock_irqrestore(&cq->spinlock, spl_flags);
92 if (ret)
93 ehca_err(cq->ib_cq.device,
94 "qp not found cq_num=%x real_qp_num=%x",
95 cq->cq_number, real_qp_num);
96
97 return ret;
98}
99
100struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
101{
102 struct ehca_qp *ret = NULL;
103 unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
104 struct hlist_node *iter;
105 struct ehca_qp *qp;
106 hlist_for_each(iter, &cq->qp_hashtab[key]) {
107 qp = hlist_entry(iter, struct ehca_qp, list_entries);
108 if (qp->real_qp_num == real_qp_num) {
109 ret = qp;
110 break;
111 }
112 }
113 return ret;
114}
115
116struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
117 struct ib_ucontext *context,
118 struct ib_udata *udata)
119{
120 static const u32 additional_cqe = 20;
121 struct ib_cq *cq;
122 struct ehca_cq *my_cq;
123 struct ehca_shca *shca =
124 container_of(device, struct ehca_shca, ib_device);
125 struct ipz_adapter_handle adapter_handle;
126 struct ehca_alloc_cq_parms param; /* h_call's out parameters */
127 struct h_galpa gal;
128 void *vpage;
129 u32 counter;
130 u64 rpage, cqx_fec, h_ret;
131 int ipz_rc, ret, i;
132 unsigned long flags;
133
134 if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
135 return ERR_PTR(-EINVAL);
136
137 my_cq = kmem_cache_alloc(cq_cache, SLAB_KERNEL);
138 if (!my_cq) {
139 ehca_err(device, "Out of memory for ehca_cq struct device=%p",
140 device);
141 return ERR_PTR(-ENOMEM);
142 }
143
144 memset(my_cq, 0, sizeof(struct ehca_cq));
145 memset(&param, 0, sizeof(struct ehca_alloc_cq_parms));
146
147 spin_lock_init(&my_cq->spinlock);
148 spin_lock_init(&my_cq->cb_lock);
149 spin_lock_init(&my_cq->task_lock);
150 my_cq->ownpid = current->tgid;
151
152 cq = &my_cq->ib_cq;
153
154 adapter_handle = shca->ipz_hca_handle;
155 param.eq_handle = shca->eq.ipz_eq_handle;
156
157 do {
158 if (!idr_pre_get(&ehca_cq_idr, GFP_KERNEL)) {
159 cq = ERR_PTR(-ENOMEM);
160 ehca_err(device, "Can't reserve idr nr. device=%p",
161 device);
162 goto create_cq_exit1;
163 }
164
165 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
166 ret = idr_get_new(&ehca_cq_idr, my_cq, &my_cq->token);
167 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
168
169 } while (ret == -EAGAIN);
170
171 if (ret) {
172 cq = ERR_PTR(-ENOMEM);
173 ehca_err(device, "Can't allocate new idr entry. device=%p",
174 device);
175 goto create_cq_exit1;
176 }
177
178 /*
179 * CQs maximum depth is 4GB-64, but we need additional 20 as buffer
180 * for receiving errors CQEs.
181 */
182 param.nr_cqe = cqe + additional_cqe;
183 h_ret = hipz_h_alloc_resource_cq(adapter_handle, my_cq, &param);
184
185 if (h_ret != H_SUCCESS) {
186 ehca_err(device, "hipz_h_alloc_resource_cq() failed "
187 "h_ret=%lx device=%p", h_ret, device);
188 cq = ERR_PTR(ehca2ib_return_code(h_ret));
189 goto create_cq_exit2;
190 }
191
192 ipz_rc = ipz_queue_ctor(&my_cq->ipz_queue, param.act_pages,
193 EHCA_PAGESIZE, sizeof(struct ehca_cqe), 0);
194 if (!ipz_rc) {
195 ehca_err(device, "ipz_queue_ctor() failed ipz_rc=%x device=%p",
196 ipz_rc, device);
197 cq = ERR_PTR(-EINVAL);
198 goto create_cq_exit3;
199 }
200
201 for (counter = 0; counter < param.act_pages; counter++) {
202 vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
203 if (!vpage) {
204 ehca_err(device, "ipz_qpageit_get_inc() "
205 "returns NULL device=%p", device);
206 cq = ERR_PTR(-EAGAIN);
207 goto create_cq_exit4;
208 }
209 rpage = virt_to_abs(vpage);
210
211 h_ret = hipz_h_register_rpage_cq(adapter_handle,
212 my_cq->ipz_cq_handle,
213 &my_cq->pf,
214 0,
215 0,
216 rpage,
217 1,
218 my_cq->galpas.
219 kernel);
220
221 if (h_ret < H_SUCCESS) {
222 ehca_err(device, "hipz_h_register_rpage_cq() failed "
223 "ehca_cq=%p cq_num=%x h_ret=%lx counter=%i "
224 "act_pages=%i", my_cq, my_cq->cq_number,
225 h_ret, counter, param.act_pages);
226 cq = ERR_PTR(-EINVAL);
227 goto create_cq_exit4;
228 }
229
230 if (counter == (param.act_pages - 1)) {
231 vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
232 if ((h_ret != H_SUCCESS) || vpage) {
233 ehca_err(device, "Registration of pages not "
234 "complete ehca_cq=%p cq_num=%x "
235 "h_ret=%lx", my_cq, my_cq->cq_number,
236 h_ret);
237 cq = ERR_PTR(-EAGAIN);
238 goto create_cq_exit4;
239 }
240 } else {
241 if (h_ret != H_PAGE_REGISTERED) {
242 ehca_err(device, "Registration of page failed "
243 "ehca_cq=%p cq_num=%x h_ret=%lx"
244 "counter=%i act_pages=%i",
245 my_cq, my_cq->cq_number,
246 h_ret, counter, param.act_pages);
247 cq = ERR_PTR(-ENOMEM);
248 goto create_cq_exit4;
249 }
250 }
251 }
252
253 ipz_qeit_reset(&my_cq->ipz_queue);
254
255 gal = my_cq->galpas.kernel;
256 cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec));
257 ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%lx",
258 my_cq, my_cq->cq_number, cqx_fec);
259
260 my_cq->ib_cq.cqe = my_cq->nr_of_entries =
261 param.act_nr_of_entries - additional_cqe;
262 my_cq->cq_number = (my_cq->ipz_cq_handle.handle) & 0xffff;
263
264 for (i = 0; i < QP_HASHTAB_LEN; i++)
265 INIT_HLIST_HEAD(&my_cq->qp_hashtab[i]);
266
267 if (context) {
268 struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
269 struct ehca_create_cq_resp resp;
270 struct vm_area_struct *vma;
271 memset(&resp, 0, sizeof(resp));
272 resp.cq_number = my_cq->cq_number;
273 resp.token = my_cq->token;
274 resp.ipz_queue.qe_size = ipz_queue->qe_size;
275 resp.ipz_queue.act_nr_of_sg = ipz_queue->act_nr_of_sg;
276 resp.ipz_queue.queue_length = ipz_queue->queue_length;
277 resp.ipz_queue.pagesize = ipz_queue->pagesize;
278 resp.ipz_queue.toggle_state = ipz_queue->toggle_state;
279 ret = ehca_mmap_nopage(((u64)(my_cq->token) << 32) | 0x12000000,
280 ipz_queue->queue_length,
281 (void**)&resp.ipz_queue.queue,
282 &vma);
283 if (ret) {
284 ehca_err(device, "Could not mmap queue pages");
285 cq = ERR_PTR(ret);
286 goto create_cq_exit4;
287 }
288 my_cq->uspace_queue = resp.ipz_queue.queue;
289 resp.galpas = my_cq->galpas;
290 ret = ehca_mmap_register(my_cq->galpas.user.fw_handle,
291 (void**)&resp.galpas.kernel.fw_handle,
292 &vma);
293 if (ret) {
294 ehca_err(device, "Could not mmap fw_handle");
295 cq = ERR_PTR(ret);
296 goto create_cq_exit5;
297 }
298 my_cq->uspace_fwh = (u64)resp.galpas.kernel.fw_handle;
299 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
300 ehca_err(device, "Copy to udata failed.");
301 goto create_cq_exit6;
302 }
303 }
304
305 return cq;
306
307create_cq_exit6:
308 ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE);
309
310create_cq_exit5:
311 ehca_munmap(my_cq->uspace_queue, my_cq->ipz_queue.queue_length);
312
313create_cq_exit4:
314 ipz_queue_dtor(&my_cq->ipz_queue);
315
316create_cq_exit3:
317 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
318 if (h_ret != H_SUCCESS)
319 ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p "
320 "cq_num=%x h_ret=%lx", my_cq, my_cq->cq_number, h_ret);
321
322create_cq_exit2:
323 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
324 idr_remove(&ehca_cq_idr, my_cq->token);
325 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
326
327create_cq_exit1:
328 kmem_cache_free(cq_cache, my_cq);
329
330 return cq;
331}
332
333int ehca_destroy_cq(struct ib_cq *cq)
334{
335 u64 h_ret;
336 int ret;
337 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
338 int cq_num = my_cq->cq_number;
339 struct ib_device *device = cq->device;
340 struct ehca_shca *shca = container_of(device, struct ehca_shca,
341 ib_device);
342 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
343 u32 cur_pid = current->tgid;
344 unsigned long flags;
345
346 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
347 while (my_cq->nr_callbacks)
348 yield();
349
350 idr_remove(&ehca_cq_idr, my_cq->token);
351 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
352
353 if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) {
354 ehca_err(device, "Invalid caller pid=%x ownpid=%x",
355 cur_pid, my_cq->ownpid);
356 return -EINVAL;
357 }
358
359 /* un-mmap if vma alloc */
360 if (my_cq->uspace_queue ) {
361 ret = ehca_munmap(my_cq->uspace_queue,
362 my_cq->ipz_queue.queue_length);
363 if (ret)
364 ehca_err(device, "Could not munmap queue ehca_cq=%p "
365 "cq_num=%x", my_cq, cq_num);
366 ret = ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE);
367 if (ret)
368 ehca_err(device, "Could not munmap fwh ehca_cq=%p "
369 "cq_num=%x", my_cq, cq_num);
370 }
371
372 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
373 if (h_ret == H_R_STATE) {
374 /* cq in err: read err data and destroy it forcibly */
375 ehca_dbg(device, "ehca_cq=%p cq_num=%x ressource=%lx in err "
376 "state. Try to delete it forcibly.",
377 my_cq, cq_num, my_cq->ipz_cq_handle.handle);
378 ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle);
379 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
380 if (h_ret == H_SUCCESS)
381 ehca_dbg(device, "cq_num=%x deleted successfully.",
382 cq_num);
383 }
384 if (h_ret != H_SUCCESS) {
385 ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lx "
386 "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num);
387 return ehca2ib_return_code(h_ret);
388 }
389 ipz_queue_dtor(&my_cq->ipz_queue);
390 kmem_cache_free(cq_cache, my_cq);
391
392 return 0;
393}
394
395int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
396{
397 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
398 u32 cur_pid = current->tgid;
399
400 if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) {
401 ehca_err(cq->device, "Invalid caller pid=%x ownpid=%x",
402 cur_pid, my_cq->ownpid);
403 return -EINVAL;
404 }
405
406 /* TODO: proper resize needs to be done */
407 ehca_err(cq->device, "not implemented yet");
408
409 return -EFAULT;
410}
411
412int ehca_init_cq_cache(void)
413{
414 cq_cache = kmem_cache_create("ehca_cache_cq",
415 sizeof(struct ehca_cq), 0,
416 SLAB_HWCACHE_ALIGN,
417 NULL, NULL);
418 if (!cq_cache)
419 return -ENOMEM;
420 return 0;
421}
422
423void ehca_cleanup_cq_cache(void)
424{
425 if (cq_cache)
426 kmem_cache_destroy(cq_cache);
427}
diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/infiniband/hw/ehca/ehca_eq.c
new file mode 100644
index 000000000000..5281dec66f12
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_eq.c
@@ -0,0 +1,185 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Event queue handling
5 *
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Khadija Souissi <souissi@de.ibm.com>
8 * Reinhard Ernst <rernst@de.ibm.com>
9 * Heiko J Schick <schickhj@de.ibm.com>
10 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
11 *
12 *
13 * Copyright (c) 2005 IBM Corporation
14 *
15 * All rights reserved.
16 *
17 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
18 * BSD.
19 *
20 * OpenIB BSD License
21 *
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions are met:
24 *
25 * Redistributions of source code must retain the above copyright notice, this
26 * list of conditions and the following disclaimer.
27 *
28 * Redistributions in binary form must reproduce the above copyright notice,
29 * this list of conditions and the following disclaimer in the documentation
30 * and/or other materials
31 * provided with the distribution.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
34 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
37 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
38 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
39 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
40 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
41 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
43 * POSSIBILITY OF SUCH DAMAGE.
44 */
45
46#include "ehca_classes.h"
47#include "ehca_irq.h"
48#include "ehca_iverbs.h"
49#include "ehca_qes.h"
50#include "hcp_if.h"
51#include "ipz_pt_fn.h"
52
53int ehca_create_eq(struct ehca_shca *shca,
54 struct ehca_eq *eq,
55 const enum ehca_eq_type type, const u32 length)
56{
57 u64 ret;
58 u32 nr_pages;
59 u32 i;
60 void *vpage;
61 struct ib_device *ib_dev = &shca->ib_device;
62
63 spin_lock_init(&eq->spinlock);
64 eq->is_initialized = 0;
65
66 if (type != EHCA_EQ && type != EHCA_NEQ) {
67 ehca_err(ib_dev, "Invalid EQ type %x. eq=%p", type, eq);
68 return -EINVAL;
69 }
70 if (!length) {
71 ehca_err(ib_dev, "EQ length must not be zero. eq=%p", eq);
72 return -EINVAL;
73 }
74
75 ret = hipz_h_alloc_resource_eq(shca->ipz_hca_handle,
76 &eq->pf,
77 type,
78 length,
79 &eq->ipz_eq_handle,
80 &eq->length,
81 &nr_pages, &eq->ist);
82
83 if (ret != H_SUCCESS) {
84 ehca_err(ib_dev, "Can't allocate EQ/NEQ. eq=%p", eq);
85 return -EINVAL;
86 }
87
88 ret = ipz_queue_ctor(&eq->ipz_queue, nr_pages,
89 EHCA_PAGESIZE, sizeof(struct ehca_eqe), 0);
90 if (!ret) {
91 ehca_err(ib_dev, "Can't allocate EQ pages eq=%p", eq);
92 goto create_eq_exit1;
93 }
94
95 for (i = 0; i < nr_pages; i++) {
96 u64 rpage;
97
98 if (!(vpage = ipz_qpageit_get_inc(&eq->ipz_queue))) {
99 ret = H_RESOURCE;
100 goto create_eq_exit2;
101 }
102
103 rpage = virt_to_abs(vpage);
104 ret = hipz_h_register_rpage_eq(shca->ipz_hca_handle,
105 eq->ipz_eq_handle,
106 &eq->pf,
107 0, 0, rpage, 1);
108
109 if (i == (nr_pages - 1)) {
110 /* last page */
111 vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
112 if (ret != H_SUCCESS || vpage)
113 goto create_eq_exit2;
114 } else {
115 if (ret != H_PAGE_REGISTERED || !vpage)
116 goto create_eq_exit2;
117 }
118 }
119
120 ipz_qeit_reset(&eq->ipz_queue);
121
122 /* register interrupt handlers and initialize work queues */
123 if (type == EHCA_EQ) {
124 ret = ibmebus_request_irq(NULL, eq->ist, ehca_interrupt_eq,
125 SA_INTERRUPT, "ehca_eq",
126 (void *)shca);
127 if (ret < 0)
128 ehca_err(ib_dev, "Can't map interrupt handler.");
129
130 tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca);
131 } else if (type == EHCA_NEQ) {
132 ret = ibmebus_request_irq(NULL, eq->ist, ehca_interrupt_neq,
133 SA_INTERRUPT, "ehca_neq",
134 (void *)shca);
135 if (ret < 0)
136 ehca_err(ib_dev, "Can't map interrupt handler.");
137
138 tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca);
139 }
140
141 eq->is_initialized = 1;
142
143 return 0;
144
145create_eq_exit2:
146 ipz_queue_dtor(&eq->ipz_queue);
147
148create_eq_exit1:
149 hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
150
151 return -EINVAL;
152}
153
154void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq)
155{
156 unsigned long flags;
157 void *eqe;
158
159 spin_lock_irqsave(&eq->spinlock, flags);
160 eqe = ipz_eqit_eq_get_inc_valid(&eq->ipz_queue);
161 spin_unlock_irqrestore(&eq->spinlock, flags);
162
163 return eqe;
164}
165
166int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq)
167{
168 unsigned long flags;
169 u64 h_ret;
170
171 spin_lock_irqsave(&eq->spinlock, flags);
172 ibmebus_free_irq(NULL, eq->ist, (void *)shca);
173
174 h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
175
176 spin_unlock_irqrestore(&eq->spinlock, flags);
177
178 if (h_ret != H_SUCCESS) {
179 ehca_err(&shca->ib_device, "Can't free EQ resources.");
180 return -EINVAL;
181 }
182 ipz_queue_dtor(&eq->ipz_queue);
183
184 return 0;
185}
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
new file mode 100644
index 000000000000..5eae6ac48425
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_hca.c
@@ -0,0 +1,241 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * HCA query functions
5 *
6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#include "ehca_tools.h"
43#include "hcp_if.h"
44
45int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
46{
47 int ret = 0;
48 struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
49 ib_device);
50 struct hipz_query_hca *rblock;
51
52 rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
53 if (!rblock) {
54 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
55 return -ENOMEM;
56 }
57
58 if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
59 ehca_err(&shca->ib_device, "Can't query device properties");
60 ret = -EINVAL;
61 goto query_device1;
62 }
63
64 memset(props, 0, sizeof(struct ib_device_attr));
65 props->fw_ver = rblock->hw_ver;
66 props->max_mr_size = rblock->max_mr_size;
67 props->vendor_id = rblock->vendor_id >> 8;
68 props->vendor_part_id = rblock->vendor_part_id >> 16;
69 props->hw_ver = rblock->hw_ver;
70 props->max_qp = min_t(int, rblock->max_qp, INT_MAX);
71 props->max_qp_wr = min_t(int, rblock->max_wqes_wq, INT_MAX);
72 props->max_sge = min_t(int, rblock->max_sge, INT_MAX);
73 props->max_sge_rd = min_t(int, rblock->max_sge_rd, INT_MAX);
74 props->max_cq = min_t(int, rblock->max_cq, INT_MAX);
75 props->max_cqe = min_t(int, rblock->max_cqe, INT_MAX);
76 props->max_mr = min_t(int, rblock->max_mr, INT_MAX);
77 props->max_mw = min_t(int, rblock->max_mw, INT_MAX);
78 props->max_pd = min_t(int, rblock->max_pd, INT_MAX);
79 props->max_ah = min_t(int, rblock->max_ah, INT_MAX);
80 props->max_fmr = min_t(int, rblock->max_mr, INT_MAX);
81 props->max_srq = 0;
82 props->max_srq_wr = 0;
83 props->max_srq_sge = 0;
84 props->max_pkeys = 16;
85 props->local_ca_ack_delay
86 = rblock->local_ca_ack_delay;
87 props->max_raw_ipv6_qp
88 = min_t(int, rblock->max_raw_ipv6_qp, INT_MAX);
89 props->max_raw_ethy_qp
90 = min_t(int, rblock->max_raw_ethy_qp, INT_MAX);
91 props->max_mcast_grp
92 = min_t(int, rblock->max_mcast_grp, INT_MAX);
93 props->max_mcast_qp_attach
94 = min_t(int, rblock->max_mcast_qp_attach, INT_MAX);
95 props->max_total_mcast_qp_attach
96 = min_t(int, rblock->max_total_mcast_qp_attach, INT_MAX);
97
98query_device1:
99 kfree(rblock);
100
101 return ret;
102}
103
104int ehca_query_port(struct ib_device *ibdev,
105 u8 port, struct ib_port_attr *props)
106{
107 int ret = 0;
108 struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
109 ib_device);
110 struct hipz_query_port *rblock;
111
112 rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
113 if (!rblock) {
114 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
115 return -ENOMEM;
116 }
117
118 if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
119 ehca_err(&shca->ib_device, "Can't query port properties");
120 ret = -EINVAL;
121 goto query_port1;
122 }
123
124 memset(props, 0, sizeof(struct ib_port_attr));
125 props->state = rblock->state;
126
127 switch (rblock->max_mtu) {
128 case 0x1:
129 props->active_mtu = props->max_mtu = IB_MTU_256;
130 break;
131 case 0x2:
132 props->active_mtu = props->max_mtu = IB_MTU_512;
133 break;
134 case 0x3:
135 props->active_mtu = props->max_mtu = IB_MTU_1024;
136 break;
137 case 0x4:
138 props->active_mtu = props->max_mtu = IB_MTU_2048;
139 break;
140 case 0x5:
141 props->active_mtu = props->max_mtu = IB_MTU_4096;
142 break;
143 default:
144 ehca_err(&shca->ib_device, "Unknown MTU size: %x.",
145 rblock->max_mtu);
146 break;
147 }
148
149 props->gid_tbl_len = rblock->gid_tbl_len;
150 props->max_msg_sz = rblock->max_msg_sz;
151 props->bad_pkey_cntr = rblock->bad_pkey_cntr;
152 props->qkey_viol_cntr = rblock->qkey_viol_cntr;
153 props->pkey_tbl_len = rblock->pkey_tbl_len;
154 props->lid = rblock->lid;
155 props->sm_lid = rblock->sm_lid;
156 props->lmc = rblock->lmc;
157 props->sm_sl = rblock->sm_sl;
158 props->subnet_timeout = rblock->subnet_timeout;
159 props->init_type_reply = rblock->init_type_reply;
160
161 props->active_width = IB_WIDTH_12X;
162 props->active_speed = 0x1;
163
164query_port1:
165 kfree(rblock);
166
167 return ret;
168}
169
170int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
171{
172 int ret = 0;
173 struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, ib_device);
174 struct hipz_query_port *rblock;
175
176 if (index > 16) {
177 ehca_err(&shca->ib_device, "Invalid index: %x.", index);
178 return -EINVAL;
179 }
180
181 rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
182 if (!rblock) {
183 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
184 return -ENOMEM;
185 }
186
187 if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
188 ehca_err(&shca->ib_device, "Can't query port properties");
189 ret = -EINVAL;
190 goto query_pkey1;
191 }
192
193 memcpy(pkey, &rblock->pkey_entries + index, sizeof(u16));
194
195query_pkey1:
196 kfree(rblock);
197
198 return ret;
199}
200
201int ehca_query_gid(struct ib_device *ibdev, u8 port,
202 int index, union ib_gid *gid)
203{
204 int ret = 0;
205 struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
206 ib_device);
207 struct hipz_query_port *rblock;
208
209 if (index > 255) {
210 ehca_err(&shca->ib_device, "Invalid index: %x.", index);
211 return -EINVAL;
212 }
213
214 rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
215 if (!rblock) {
216 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
217 return -ENOMEM;
218 }
219
220 if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
221 ehca_err(&shca->ib_device, "Can't query port properties");
222 ret = -EINVAL;
223 goto query_gid1;
224 }
225
226 memcpy(&gid->raw[0], &rblock->gid_prefix, sizeof(u64));
227 memcpy(&gid->raw[8], &rblock->guid_entries[index], sizeof(u64));
228
229query_gid1:
230 kfree(rblock);
231
232 return ret;
233}
234
235int ehca_modify_port(struct ib_device *ibdev,
236 u8 port, int port_modify_mask,
237 struct ib_port_modify *props)
238{
239 /* Not implemented yet */
240 return -EFAULT;
241}
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
new file mode 100644
index 000000000000..2a65b5be1979
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -0,0 +1,762 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Functions for EQs, NEQs and interrupts
5 *
6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Khadija Souissi <souissi@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#include "ehca_classes.h"
43#include "ehca_irq.h"
44#include "ehca_iverbs.h"
45#include "ehca_tools.h"
46#include "hcp_if.h"
47#include "hipz_fns.h"
48
49#define EQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1)
50#define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM(8,31)
51#define EQE_EE_IDENTIFIER EHCA_BMASK_IBM(2,7)
52#define EQE_CQ_NUMBER EHCA_BMASK_IBM(8,31)
53#define EQE_QP_NUMBER EHCA_BMASK_IBM(8,31)
54#define EQE_QP_TOKEN EHCA_BMASK_IBM(32,63)
55#define EQE_CQ_TOKEN EHCA_BMASK_IBM(32,63)
56
57#define NEQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1)
58#define NEQE_EVENT_CODE EHCA_BMASK_IBM(2,7)
59#define NEQE_PORT_NUMBER EHCA_BMASK_IBM(8,15)
60#define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16,16)
61
62#define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52,63)
63#define ERROR_DATA_TYPE EHCA_BMASK_IBM(0,7)
64
65#ifdef CONFIG_INFINIBAND_EHCA_SCALING
66
67static void queue_comp_task(struct ehca_cq *__cq);
68
69static struct ehca_comp_pool* pool;
70static struct notifier_block comp_pool_callback_nb;
71
72#endif
73
74static inline void comp_event_callback(struct ehca_cq *cq)
75{
76 if (!cq->ib_cq.comp_handler)
77 return;
78
79 spin_lock(&cq->cb_lock);
80 cq->ib_cq.comp_handler(&cq->ib_cq, cq->ib_cq.cq_context);
81 spin_unlock(&cq->cb_lock);
82
83 return;
84}
85
86static void print_error_data(struct ehca_shca * shca, void* data,
87 u64* rblock, int length)
88{
89 u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
90 u64 resource = rblock[1];
91
92 switch (type) {
93 case 0x1: /* Queue Pair */
94 {
95 struct ehca_qp *qp = (struct ehca_qp*)data;
96
97 /* only print error data if AER is set */
98 if (rblock[6] == 0)
99 return;
100
101 ehca_err(&shca->ib_device,
102 "QP 0x%x (resource=%lx) has errors.",
103 qp->ib_qp.qp_num, resource);
104 break;
105 }
106 case 0x4: /* Completion Queue */
107 {
108 struct ehca_cq *cq = (struct ehca_cq*)data;
109
110 ehca_err(&shca->ib_device,
111 "CQ 0x%x (resource=%lx) has errors.",
112 cq->cq_number, resource);
113 break;
114 }
115 default:
116 ehca_err(&shca->ib_device,
117 "Unknown errror type: %lx on %s.",
118 type, shca->ib_device.name);
119 break;
120 }
121
122 ehca_err(&shca->ib_device, "Error data is available: %lx.", resource);
123 ehca_err(&shca->ib_device, "EHCA ----- error data begin "
124 "---------------------------------------------------");
125 ehca_dmp(rblock, length, "resource=%lx", resource);
126 ehca_err(&shca->ib_device, "EHCA ----- error data end "
127 "----------------------------------------------------");
128
129 return;
130}
131
132int ehca_error_data(struct ehca_shca *shca, void *data,
133 u64 resource)
134{
135
136 unsigned long ret;
137 u64 *rblock;
138 unsigned long block_count;
139
140 rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
141 if (!rblock) {
142 ehca_err(&shca->ib_device, "Cannot allocate rblock memory.");
143 ret = -ENOMEM;
144 goto error_data1;
145 }
146
147 ret = hipz_h_error_data(shca->ipz_hca_handle,
148 resource,
149 rblock,
150 &block_count);
151
152 if (ret == H_R_STATE) {
153 ehca_err(&shca->ib_device,
154 "No error data is available: %lx.", resource);
155 }
156 else if (ret == H_SUCCESS) {
157 int length;
158
159 length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]);
160
161 if (length > PAGE_SIZE)
162 length = PAGE_SIZE;
163
164 print_error_data(shca, data, rblock, length);
165 }
166 else {
167 ehca_err(&shca->ib_device,
168 "Error data could not be fetched: %lx", resource);
169 }
170
171 kfree(rblock);
172
173error_data1:
174 return ret;
175
176}
177
178static void qp_event_callback(struct ehca_shca *shca,
179 u64 eqe,
180 enum ib_event_type event_type)
181{
182 struct ib_event event;
183 struct ehca_qp *qp;
184 unsigned long flags;
185 u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);
186
187 spin_lock_irqsave(&ehca_qp_idr_lock, flags);
188 qp = idr_find(&ehca_qp_idr, token);
189 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
190
191
192 if (!qp)
193 return;
194
195 ehca_error_data(shca, qp, qp->ipz_qp_handle.handle);
196
197 if (!qp->ib_qp.event_handler)
198 return;
199
200 event.device = &shca->ib_device;
201 event.event = event_type;
202 event.element.qp = &qp->ib_qp;
203
204 qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
205
206 return;
207}
208
209static void cq_event_callback(struct ehca_shca *shca,
210 u64 eqe)
211{
212 struct ehca_cq *cq;
213 unsigned long flags;
214 u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe);
215
216 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
217 cq = idr_find(&ehca_cq_idr, token);
218 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
219
220 if (!cq)
221 return;
222
223 ehca_error_data(shca, cq, cq->ipz_cq_handle.handle);
224
225 return;
226}
227
228static void parse_identifier(struct ehca_shca *shca, u64 eqe)
229{
230 u8 identifier = EHCA_BMASK_GET(EQE_EE_IDENTIFIER, eqe);
231
232 switch (identifier) {
233 case 0x02: /* path migrated */
234 qp_event_callback(shca, eqe, IB_EVENT_PATH_MIG);
235 break;
236 case 0x03: /* communication established */
237 qp_event_callback(shca, eqe, IB_EVENT_COMM_EST);
238 break;
239 case 0x04: /* send queue drained */
240 qp_event_callback(shca, eqe, IB_EVENT_SQ_DRAINED);
241 break;
242 case 0x05: /* QP error */
243 case 0x06: /* QP error */
244 qp_event_callback(shca, eqe, IB_EVENT_QP_FATAL);
245 break;
246 case 0x07: /* CQ error */
247 case 0x08: /* CQ error */
248 cq_event_callback(shca, eqe);
249 break;
250 case 0x09: /* MRMWPTE error */
251 ehca_err(&shca->ib_device, "MRMWPTE error.");
252 break;
253 case 0x0A: /* port event */
254 ehca_err(&shca->ib_device, "Port event.");
255 break;
256 case 0x0B: /* MR access error */
257 ehca_err(&shca->ib_device, "MR access error.");
258 break;
259 case 0x0C: /* EQ error */
260 ehca_err(&shca->ib_device, "EQ error.");
261 break;
262 case 0x0D: /* P/Q_Key mismatch */
263 ehca_err(&shca->ib_device, "P/Q_Key mismatch.");
264 break;
265 case 0x10: /* sampling complete */
266 ehca_err(&shca->ib_device, "Sampling complete.");
267 break;
268 case 0x11: /* unaffiliated access error */
269 ehca_err(&shca->ib_device, "Unaffiliated access error.");
270 break;
271 case 0x12: /* path migrating error */
272 ehca_err(&shca->ib_device, "Path migration error.");
273 break;
274 case 0x13: /* interface trace stopped */
275 ehca_err(&shca->ib_device, "Interface trace stopped.");
276 break;
277 case 0x14: /* first error capture info available */
278 default:
279 ehca_err(&shca->ib_device, "Unknown identifier: %x on %s.",
280 identifier, shca->ib_device.name);
281 break;
282 }
283
284 return;
285}
286
287static void parse_ec(struct ehca_shca *shca, u64 eqe)
288{
289 struct ib_event event;
290 u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
291 u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
292
293 switch (ec) {
294 case 0x30: /* port availability change */
295 if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
296 ehca_info(&shca->ib_device,
297 "port %x is active.", port);
298 event.device = &shca->ib_device;
299 event.event = IB_EVENT_PORT_ACTIVE;
300 event.element.port_num = port;
301 shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
302 ib_dispatch_event(&event);
303 } else {
304 ehca_info(&shca->ib_device,
305 "port %x is inactive.", port);
306 event.device = &shca->ib_device;
307 event.event = IB_EVENT_PORT_ERR;
308 event.element.port_num = port;
309 shca->sport[port - 1].port_state = IB_PORT_DOWN;
310 ib_dispatch_event(&event);
311 }
312 break;
313 case 0x31:
314 /* port configuration change
315 * disruptive change is caused by
316 * LID, PKEY or SM change
317 */
318 ehca_warn(&shca->ib_device,
319 "disruptive port %x configuration change", port);
320
321 ehca_info(&shca->ib_device,
322 "port %x is inactive.", port);
323 event.device = &shca->ib_device;
324 event.event = IB_EVENT_PORT_ERR;
325 event.element.port_num = port;
326 shca->sport[port - 1].port_state = IB_PORT_DOWN;
327 ib_dispatch_event(&event);
328
329 ehca_info(&shca->ib_device,
330 "port %x is active.", port);
331 event.device = &shca->ib_device;
332 event.event = IB_EVENT_PORT_ACTIVE;
333 event.element.port_num = port;
334 shca->sport[port - 1].port_state = IB_PORT_ACTIVE;
335 ib_dispatch_event(&event);
336 break;
337 case 0x32: /* adapter malfunction */
338 ehca_err(&shca->ib_device, "Adapter malfunction.");
339 break;
340 case 0x33: /* trace stopped */
341 ehca_err(&shca->ib_device, "Traced stopped.");
342 break;
343 default:
344 ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
345 ec, shca->ib_device.name);
346 break;
347 }
348
349 return;
350}
351
352static inline void reset_eq_pending(struct ehca_cq *cq)
353{
354 u64 CQx_EP;
355 struct h_galpa gal = cq->galpas.kernel;
356
357 hipz_galpa_store_cq(gal, cqx_ep, 0x0);
358 CQx_EP = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_ep));
359
360 return;
361}
362
363irqreturn_t ehca_interrupt_neq(int irq, void *dev_id, struct pt_regs *regs)
364{
365 struct ehca_shca *shca = (struct ehca_shca*)dev_id;
366
367 tasklet_hi_schedule(&shca->neq.interrupt_task);
368
369 return IRQ_HANDLED;
370}
371
372void ehca_tasklet_neq(unsigned long data)
373{
374 struct ehca_shca *shca = (struct ehca_shca*)data;
375 struct ehca_eqe *eqe;
376 u64 ret;
377
378 eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq);
379
380 while (eqe) {
381 if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry))
382 parse_ec(shca, eqe->entry);
383
384 eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq);
385 }
386
387 ret = hipz_h_reset_event(shca->ipz_hca_handle,
388 shca->neq.ipz_eq_handle, 0xFFFFFFFFFFFFFFFFL);
389
390 if (ret != H_SUCCESS)
391 ehca_err(&shca->ib_device, "Can't clear notification events.");
392
393 return;
394}
395
396irqreturn_t ehca_interrupt_eq(int irq, void *dev_id, struct pt_regs *regs)
397{
398 struct ehca_shca *shca = (struct ehca_shca*)dev_id;
399
400 tasklet_hi_schedule(&shca->eq.interrupt_task);
401
402 return IRQ_HANDLED;
403}
404
405void ehca_tasklet_eq(unsigned long data)
406{
407 struct ehca_shca *shca = (struct ehca_shca*)data;
408 struct ehca_eqe *eqe;
409 int int_state;
410 int query_cnt = 0;
411
412 do {
413 eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq);
414
415 if ((shca->hw_level >= 2) && eqe)
416 int_state = 1;
417 else
418 int_state = 0;
419
420 while ((int_state == 1) || eqe) {
421 while (eqe) {
422 u64 eqe_value = eqe->entry;
423
424 ehca_dbg(&shca->ib_device,
425 "eqe_value=%lx", eqe_value);
426
427 /* TODO: better structure */
428 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT,
429 eqe_value)) {
430 unsigned long flags;
431 u32 token;
432 struct ehca_cq *cq;
433
434 ehca_dbg(&shca->ib_device,
435 "... completion event");
436 token =
437 EHCA_BMASK_GET(EQE_CQ_TOKEN,
438 eqe_value);
439 spin_lock_irqsave(&ehca_cq_idr_lock,
440 flags);
441 cq = idr_find(&ehca_cq_idr, token);
442
443 if (cq == NULL) {
444 spin_unlock(&ehca_cq_idr_lock);
445 break;
446 }
447
448 reset_eq_pending(cq);
449#ifdef CONFIG_INFINIBAND_EHCA_SCALING
450 queue_comp_task(cq);
451 spin_unlock_irqrestore(&ehca_cq_idr_lock,
452 flags);
453#else
454 spin_unlock_irqrestore(&ehca_cq_idr_lock,
455 flags);
456 comp_event_callback(cq);
457#endif
458 } else {
459 ehca_dbg(&shca->ib_device,
460 "... non completion event");
461 parse_identifier(shca, eqe_value);
462 }
463 eqe =
464 (struct ehca_eqe *)ehca_poll_eq(shca,
465 &shca->eq);
466 }
467
468 if (shca->hw_level >= 2) {
469 int_state =
470 hipz_h_query_int_state(shca->ipz_hca_handle,
471 shca->eq.ist);
472 query_cnt++;
473 iosync();
474 if (query_cnt >= 100) {
475 query_cnt = 0;
476 int_state = 0;
477 }
478 }
479 eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq);
480
481 }
482 } while (int_state != 0);
483
484 return;
485}
486
487#ifdef CONFIG_INFINIBAND_EHCA_SCALING
488
489static inline int find_next_online_cpu(struct ehca_comp_pool* pool)
490{
491 unsigned long flags_last_cpu;
492
493 if (ehca_debug_level)
494 ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
495
496 spin_lock_irqsave(&pool->last_cpu_lock, flags_last_cpu);
497 pool->last_cpu = next_cpu(pool->last_cpu, cpu_online_map);
498 if (pool->last_cpu == NR_CPUS)
499 pool->last_cpu = first_cpu(cpu_online_map);
500 spin_unlock_irqrestore(&pool->last_cpu_lock, flags_last_cpu);
501
502 return pool->last_cpu;
503}
504
505static void __queue_comp_task(struct ehca_cq *__cq,
506 struct ehca_cpu_comp_task *cct)
507{
508 unsigned long flags_cct;
509 unsigned long flags_cq;
510
511 spin_lock_irqsave(&cct->task_lock, flags_cct);
512 spin_lock_irqsave(&__cq->task_lock, flags_cq);
513
514 if (__cq->nr_callbacks == 0) {
515 __cq->nr_callbacks++;
516 list_add_tail(&__cq->entry, &cct->cq_list);
517 cct->cq_jobs++;
518 wake_up(&cct->wait_queue);
519 }
520 else
521 __cq->nr_callbacks++;
522
523 spin_unlock_irqrestore(&__cq->task_lock, flags_cq);
524 spin_unlock_irqrestore(&cct->task_lock, flags_cct);
525}
526
527static void queue_comp_task(struct ehca_cq *__cq)
528{
529 int cpu;
530 int cpu_id;
531 struct ehca_cpu_comp_task *cct;
532
533 cpu = get_cpu();
534 cpu_id = find_next_online_cpu(pool);
535
536 BUG_ON(!cpu_online(cpu_id));
537
538 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
539
540 if (cct->cq_jobs > 0) {
541 cpu_id = find_next_online_cpu(pool);
542 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
543 }
544
545 __queue_comp_task(__cq, cct);
546
547 put_cpu();
548
549 return;
550}
551
552static void run_comp_task(struct ehca_cpu_comp_task* cct)
553{
554 struct ehca_cq *cq;
555 unsigned long flags_cct;
556 unsigned long flags_cq;
557
558 spin_lock_irqsave(&cct->task_lock, flags_cct);
559
560 while (!list_empty(&cct->cq_list)) {
561 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
562 spin_unlock_irqrestore(&cct->task_lock, flags_cct);
563 comp_event_callback(cq);
564 spin_lock_irqsave(&cct->task_lock, flags_cct);
565
566 spin_lock_irqsave(&cq->task_lock, flags_cq);
567 cq->nr_callbacks--;
568 if (cq->nr_callbacks == 0) {
569 list_del_init(cct->cq_list.next);
570 cct->cq_jobs--;
571 }
572 spin_unlock_irqrestore(&cq->task_lock, flags_cq);
573
574 }
575
576 spin_unlock_irqrestore(&cct->task_lock, flags_cct);
577
578 return;
579}
580
581static int comp_task(void *__cct)
582{
583 struct ehca_cpu_comp_task* cct = __cct;
584 DECLARE_WAITQUEUE(wait, current);
585
586 set_current_state(TASK_INTERRUPTIBLE);
587 while(!kthread_should_stop()) {
588 add_wait_queue(&cct->wait_queue, &wait);
589
590 if (list_empty(&cct->cq_list))
591 schedule();
592 else
593 __set_current_state(TASK_RUNNING);
594
595 remove_wait_queue(&cct->wait_queue, &wait);
596
597 if (!list_empty(&cct->cq_list))
598 run_comp_task(__cct);
599
600 set_current_state(TASK_INTERRUPTIBLE);
601 }
602 __set_current_state(TASK_RUNNING);
603
604 return 0;
605}
606
607static struct task_struct *create_comp_task(struct ehca_comp_pool *pool,
608 int cpu)
609{
610 struct ehca_cpu_comp_task *cct;
611
612 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
613 spin_lock_init(&cct->task_lock);
614 INIT_LIST_HEAD(&cct->cq_list);
615 init_waitqueue_head(&cct->wait_queue);
616 cct->task = kthread_create(comp_task, cct, "ehca_comp/%d", cpu);
617
618 return cct->task;
619}
620
621static void destroy_comp_task(struct ehca_comp_pool *pool,
622 int cpu)
623{
624 struct ehca_cpu_comp_task *cct;
625 struct task_struct *task;
626 unsigned long flags_cct;
627
628 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
629
630 spin_lock_irqsave(&cct->task_lock, flags_cct);
631
632 task = cct->task;
633 cct->task = NULL;
634 cct->cq_jobs = 0;
635
636 spin_unlock_irqrestore(&cct->task_lock, flags_cct);
637
638 if (task)
639 kthread_stop(task);
640
641 return;
642}
643
644static void take_over_work(struct ehca_comp_pool *pool,
645 int cpu)
646{
647 struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
648 LIST_HEAD(list);
649 struct ehca_cq *cq;
650 unsigned long flags_cct;
651
652 spin_lock_irqsave(&cct->task_lock, flags_cct);
653
654 list_splice_init(&cct->cq_list, &list);
655
656 while(!list_empty(&list)) {
657 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
658
659 list_del(&cq->entry);
660 __queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks,
661 smp_processor_id()));
662 }
663
664 spin_unlock_irqrestore(&cct->task_lock, flags_cct);
665
666}
667
668static int comp_pool_callback(struct notifier_block *nfb,
669 unsigned long action,
670 void *hcpu)
671{
672 unsigned int cpu = (unsigned long)hcpu;
673 struct ehca_cpu_comp_task *cct;
674
675 switch (action) {
676 case CPU_UP_PREPARE:
677 ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
678 if(!create_comp_task(pool, cpu)) {
679 ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
680 return NOTIFY_BAD;
681 }
682 break;
683 case CPU_UP_CANCELED:
684 ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu);
685 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
686 kthread_bind(cct->task, any_online_cpu(cpu_online_map));
687 destroy_comp_task(pool, cpu);
688 break;
689 case CPU_ONLINE:
690 ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu);
691 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
692 kthread_bind(cct->task, cpu);
693 wake_up_process(cct->task);
694 break;
695 case CPU_DOWN_PREPARE:
696 ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu);
697 break;
698 case CPU_DOWN_FAILED:
699 ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu);
700 break;
701 case CPU_DEAD:
702 ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu);
703 destroy_comp_task(pool, cpu);
704 take_over_work(pool, cpu);
705 break;
706 }
707
708 return NOTIFY_OK;
709}
710
711#endif
712
713int ehca_create_comp_pool(void)
714{
715#ifdef CONFIG_INFINIBAND_EHCA_SCALING
716 int cpu;
717 struct task_struct *task;
718
719 pool = kzalloc(sizeof(struct ehca_comp_pool), GFP_KERNEL);
720 if (pool == NULL)
721 return -ENOMEM;
722
723 spin_lock_init(&pool->last_cpu_lock);
724 pool->last_cpu = any_online_cpu(cpu_online_map);
725
726 pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);
727 if (pool->cpu_comp_tasks == NULL) {
728 kfree(pool);
729 return -EINVAL;
730 }
731
732 for_each_online_cpu(cpu) {
733 task = create_comp_task(pool, cpu);
734 if (task) {
735 kthread_bind(task, cpu);
736 wake_up_process(task);
737 }
738 }
739
740 comp_pool_callback_nb.notifier_call = comp_pool_callback;
741 comp_pool_callback_nb.priority =0;
742 register_cpu_notifier(&comp_pool_callback_nb);
743#endif
744
745 return 0;
746}
747
748void ehca_destroy_comp_pool(void)
749{
750#ifdef CONFIG_INFINIBAND_EHCA_SCALING
751 int i;
752
753 unregister_cpu_notifier(&comp_pool_callback_nb);
754
755 for (i = 0; i < NR_CPUS; i++) {
756 if (cpu_online(i))
757 destroy_comp_task(pool, i);
758 }
759#endif
760
761 return;
762}
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.h b/drivers/infiniband/hw/ehca/ehca_irq.h
new file mode 100644
index 000000000000..85bf1fe16fe4
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_irq.h
@@ -0,0 +1,77 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Function definitions and structs for EQs, NEQs and interrupts
5 *
6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Khadija Souissi <souissi@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#ifndef __EHCA_IRQ_H
43#define __EHCA_IRQ_H
44
45
46struct ehca_shca;
47
48#include <linux/interrupt.h>
49#include <linux/types.h>
50#include <asm/atomic.h>
51
52int ehca_error_data(struct ehca_shca *shca, void *data, u64 resource);
53
54irqreturn_t ehca_interrupt_neq(int irq, void *dev_id, struct pt_regs *regs);
55void ehca_tasklet_neq(unsigned long data);
56
57irqreturn_t ehca_interrupt_eq(int irq, void *dev_id, struct pt_regs *regs);
58void ehca_tasklet_eq(unsigned long data);
59
60struct ehca_cpu_comp_task {
61 wait_queue_head_t wait_queue;
62 struct list_head cq_list;
63 struct task_struct *task;
64 spinlock_t task_lock;
65 int cq_jobs;
66};
67
68struct ehca_comp_pool {
69 struct ehca_cpu_comp_task *cpu_comp_tasks;
70 int last_cpu;
71 spinlock_t last_cpu_lock;
72};
73
74int ehca_create_comp_pool(void);
75void ehca_destroy_comp_pool(void);
76
77#endif
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
new file mode 100644
index 000000000000..319c39d47f3a
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -0,0 +1,182 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Function definitions for internal functions
5 *
6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Dietmar Decker <ddecker@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#ifndef __EHCA_IVERBS_H__
43#define __EHCA_IVERBS_H__
44
45#include "ehca_classes.h"
46
47int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props);
48
49int ehca_query_port(struct ib_device *ibdev, u8 port,
50 struct ib_port_attr *props);
51
52int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 * pkey);
53
54int ehca_query_gid(struct ib_device *ibdev, u8 port, int index,
55 union ib_gid *gid);
56
57int ehca_modify_port(struct ib_device *ibdev, u8 port, int port_modify_mask,
58 struct ib_port_modify *props);
59
60struct ib_pd *ehca_alloc_pd(struct ib_device *device,
61 struct ib_ucontext *context,
62 struct ib_udata *udata);
63
64int ehca_dealloc_pd(struct ib_pd *pd);
65
66struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
67
68int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
69
70int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
71
72int ehca_destroy_ah(struct ib_ah *ah);
73
74struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
75
76struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
77 struct ib_phys_buf *phys_buf_array,
78 int num_phys_buf,
79 int mr_access_flags, u64 *iova_start);
80
81struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd,
82 struct ib_umem *region,
83 int mr_access_flags, struct ib_udata *udata);
84
85int ehca_rereg_phys_mr(struct ib_mr *mr,
86 int mr_rereg_mask,
87 struct ib_pd *pd,
88 struct ib_phys_buf *phys_buf_array,
89 int num_phys_buf, int mr_access_flags, u64 *iova_start);
90
91int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
92
93int ehca_dereg_mr(struct ib_mr *mr);
94
95struct ib_mw *ehca_alloc_mw(struct ib_pd *pd);
96
97int ehca_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
98 struct ib_mw_bind *mw_bind);
99
100int ehca_dealloc_mw(struct ib_mw *mw);
101
102struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
103 int mr_access_flags,
104 struct ib_fmr_attr *fmr_attr);
105
106int ehca_map_phys_fmr(struct ib_fmr *fmr,
107 u64 *page_list, int list_len, u64 iova);
108
109int ehca_unmap_fmr(struct list_head *fmr_list);
110
111int ehca_dealloc_fmr(struct ib_fmr *fmr);
112
113enum ehca_eq_type {
114 EHCA_EQ = 0, /* Event Queue */
115 EHCA_NEQ /* Notification Event Queue */
116};
117
118int ehca_create_eq(struct ehca_shca *shca, struct ehca_eq *eq,
119 enum ehca_eq_type type, const u32 length);
120
121int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq);
122
123void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq);
124
125
126struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
127 struct ib_ucontext *context,
128 struct ib_udata *udata);
129
130int ehca_destroy_cq(struct ib_cq *cq);
131
132int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
133
134int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
135
136int ehca_peek_cq(struct ib_cq *cq, int wc_cnt);
137
138int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify);
139
140struct ib_qp *ehca_create_qp(struct ib_pd *pd,
141 struct ib_qp_init_attr *init_attr,
142 struct ib_udata *udata);
143
144int ehca_destroy_qp(struct ib_qp *qp);
145
146int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
147 struct ib_udata *udata);
148
149int ehca_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
150 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
151
152int ehca_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr,
153 struct ib_send_wr **bad_send_wr);
154
155int ehca_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
156 struct ib_recv_wr **bad_recv_wr);
157
158u64 ehca_define_sqp(struct ehca_shca *shca, struct ehca_qp *ibqp,
159 struct ib_qp_init_attr *qp_init_attr);
160
161int ehca_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
162
163int ehca_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
164
165struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
166 struct ib_udata *udata);
167
168int ehca_dealloc_ucontext(struct ib_ucontext *context);
169
170int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
171
172void ehca_poll_eqs(unsigned long data);
173
174int ehca_mmap_nopage(u64 foffset,u64 length,void **mapped,
175 struct vm_area_struct **vma);
176
177int ehca_mmap_register(u64 physical,void **mapped,
178 struct vm_area_struct **vma);
179
180int ehca_munmap(unsigned long addr, size_t len);
181
182#endif
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
new file mode 100644
index 000000000000..2380994418a5
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -0,0 +1,818 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * module start stop, hca detection
5 *
6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
9 *
10 * Copyright (c) 2005 IBM Corporation
11 *
12 * All rights reserved.
13 *
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
15 * BSD.
16 *
17 * OpenIB BSD License
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
21 *
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
24 *
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43#include "ehca_classes.h"
44#include "ehca_iverbs.h"
45#include "ehca_mrmw.h"
46#include "ehca_tools.h"
47#include "hcp_if.h"
48
49MODULE_LICENSE("Dual BSD/GPL");
50MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
51MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
52MODULE_VERSION("SVNEHCA_0016");
53
54int ehca_open_aqp1 = 0;
55int ehca_debug_level = 0;
56int ehca_hw_level = 0;
57int ehca_nr_ports = 2;
58int ehca_use_hp_mr = 0;
59int ehca_port_act_time = 30;
60int ehca_poll_all_eqs = 1;
61int ehca_static_rate = -1;
62
63module_param_named(open_aqp1, ehca_open_aqp1, int, 0);
64module_param_named(debug_level, ehca_debug_level, int, 0);
65module_param_named(hw_level, ehca_hw_level, int, 0);
66module_param_named(nr_ports, ehca_nr_ports, int, 0);
67module_param_named(use_hp_mr, ehca_use_hp_mr, int, 0);
68module_param_named(port_act_time, ehca_port_act_time, int, 0);
69module_param_named(poll_all_eqs, ehca_poll_all_eqs, int, 0);
70module_param_named(static_rate, ehca_static_rate, int, 0);
71
72MODULE_PARM_DESC(open_aqp1,
73 "AQP1 on startup (0: no (default), 1: yes)");
74MODULE_PARM_DESC(debug_level,
75 "debug level"
76 " (0: no debug traces (default), 1: with debug traces)");
77MODULE_PARM_DESC(hw_level,
78 "hardware level"
79 " (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)");
80MODULE_PARM_DESC(nr_ports,
81 "number of connected ports (default: 2)");
82MODULE_PARM_DESC(use_hp_mr,
83 "high performance MRs (0: no (default), 1: yes)");
84MODULE_PARM_DESC(port_act_time,
85 "time to wait for port activation (default: 30 sec)");
86MODULE_PARM_DESC(poll_all_eqs,
87 "polls all event queues periodically"
88 " (0: no, 1: yes (default))");
89MODULE_PARM_DESC(static_rate,
90 "set permanent static rate (default: disabled)");
91
92spinlock_t ehca_qp_idr_lock;
93spinlock_t ehca_cq_idr_lock;
94DEFINE_IDR(ehca_qp_idr);
95DEFINE_IDR(ehca_cq_idr);
96
97static struct list_head shca_list; /* list of all registered ehcas */
98static spinlock_t shca_list_lock;
99
100static struct timer_list poll_eqs_timer;
101
102static int ehca_create_slab_caches(void)
103{
104 int ret;
105
106 ret = ehca_init_pd_cache();
107 if (ret) {
108 ehca_gen_err("Cannot create PD SLAB cache.");
109 return ret;
110 }
111
112 ret = ehca_init_cq_cache();
113 if (ret) {
114 ehca_gen_err("Cannot create CQ SLAB cache.");
115 goto create_slab_caches2;
116 }
117
118 ret = ehca_init_qp_cache();
119 if (ret) {
120 ehca_gen_err("Cannot create QP SLAB cache.");
121 goto create_slab_caches3;
122 }
123
124 ret = ehca_init_av_cache();
125 if (ret) {
126 ehca_gen_err("Cannot create AV SLAB cache.");
127 goto create_slab_caches4;
128 }
129
130 ret = ehca_init_mrmw_cache();
131 if (ret) {
132 ehca_gen_err("Cannot create MR&MW SLAB cache.");
133 goto create_slab_caches5;
134 }
135
136 return 0;
137
138create_slab_caches5:
139 ehca_cleanup_av_cache();
140
141create_slab_caches4:
142 ehca_cleanup_qp_cache();
143
144create_slab_caches3:
145 ehca_cleanup_cq_cache();
146
147create_slab_caches2:
148 ehca_cleanup_pd_cache();
149
150 return ret;
151}
152
153static void ehca_destroy_slab_caches(void)
154{
155 ehca_cleanup_mrmw_cache();
156 ehca_cleanup_av_cache();
157 ehca_cleanup_qp_cache();
158 ehca_cleanup_cq_cache();
159 ehca_cleanup_pd_cache();
160}
161
162#define EHCA_HCAAVER EHCA_BMASK_IBM(32,39)
163#define EHCA_REVID EHCA_BMASK_IBM(40,63)
164
165int ehca_sense_attributes(struct ehca_shca *shca)
166{
167 int ret = 0;
168 u64 h_ret;
169 struct hipz_query_hca *rblock;
170
171 rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
172 if (!rblock) {
173 ehca_gen_err("Cannot allocate rblock memory.");
174 return -ENOMEM;
175 }
176
177 h_ret = hipz_h_query_hca(shca->ipz_hca_handle, rblock);
178 if (h_ret != H_SUCCESS) {
179 ehca_gen_err("Cannot query device properties. h_ret=%lx",
180 h_ret);
181 ret = -EPERM;
182 goto num_ports1;
183 }
184
185 if (ehca_nr_ports == 1)
186 shca->num_ports = 1;
187 else
188 shca->num_ports = (u8)rblock->num_ports;
189
190 ehca_gen_dbg(" ... found %x ports", rblock->num_ports);
191
192 if (ehca_hw_level == 0) {
193 u32 hcaaver;
194 u32 revid;
195
196 hcaaver = EHCA_BMASK_GET(EHCA_HCAAVER, rblock->hw_ver);
197 revid = EHCA_BMASK_GET(EHCA_REVID, rblock->hw_ver);
198
199 ehca_gen_dbg(" ... hardware version=%x:%x", hcaaver, revid);
200
201 if ((hcaaver == 1) && (revid == 0))
202 shca->hw_level = 0;
203 else if ((hcaaver == 1) && (revid == 1))
204 shca->hw_level = 1;
205 else if ((hcaaver == 1) && (revid == 2))
206 shca->hw_level = 2;
207 }
208 ehca_gen_dbg(" ... hardware level=%x", shca->hw_level);
209
210 shca->sport[0].rate = IB_RATE_30_GBPS;
211 shca->sport[1].rate = IB_RATE_30_GBPS;
212
213num_ports1:
214 kfree(rblock);
215 return ret;
216}
217
218static int init_node_guid(struct ehca_shca *shca)
219{
220 int ret = 0;
221 struct hipz_query_hca *rblock;
222
223 rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
224 if (!rblock) {
225 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
226 return -ENOMEM;
227 }
228
229 if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
230 ehca_err(&shca->ib_device, "Can't query device properties");
231 ret = -EINVAL;
232 goto init_node_guid1;
233 }
234
235 memcpy(&shca->ib_device.node_guid, &rblock->node_guid, sizeof(u64));
236
237init_node_guid1:
238 kfree(rblock);
239 return ret;
240}
241
242int ehca_register_device(struct ehca_shca *shca)
243{
244 int ret;
245
246 ret = init_node_guid(shca);
247 if (ret)
248 return ret;
249
250 strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX);
251 shca->ib_device.owner = THIS_MODULE;
252
253 shca->ib_device.uverbs_abi_ver = 5;
254 shca->ib_device.uverbs_cmd_mask =
255 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
256 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
257 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
258 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
259 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
260 (1ull << IB_USER_VERBS_CMD_REG_MR) |
261 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
262 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
263 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
264 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
265 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
266 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
267 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
268 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
269 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
270 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
271
272 shca->ib_device.node_type = RDMA_NODE_IB_CA;
273 shca->ib_device.phys_port_cnt = shca->num_ports;
274 shca->ib_device.dma_device = &shca->ibmebus_dev->ofdev.dev;
275 shca->ib_device.query_device = ehca_query_device;
276 shca->ib_device.query_port = ehca_query_port;
277 shca->ib_device.query_gid = ehca_query_gid;
278 shca->ib_device.query_pkey = ehca_query_pkey;
279 /* shca->in_device.modify_device = ehca_modify_device */
280 shca->ib_device.modify_port = ehca_modify_port;
281 shca->ib_device.alloc_ucontext = ehca_alloc_ucontext;
282 shca->ib_device.dealloc_ucontext = ehca_dealloc_ucontext;
283 shca->ib_device.alloc_pd = ehca_alloc_pd;
284 shca->ib_device.dealloc_pd = ehca_dealloc_pd;
285 shca->ib_device.create_ah = ehca_create_ah;
286 /* shca->ib_device.modify_ah = ehca_modify_ah; */
287 shca->ib_device.query_ah = ehca_query_ah;
288 shca->ib_device.destroy_ah = ehca_destroy_ah;
289 shca->ib_device.create_qp = ehca_create_qp;
290 shca->ib_device.modify_qp = ehca_modify_qp;
291 shca->ib_device.query_qp = ehca_query_qp;
292 shca->ib_device.destroy_qp = ehca_destroy_qp;
293 shca->ib_device.post_send = ehca_post_send;
294 shca->ib_device.post_recv = ehca_post_recv;
295 shca->ib_device.create_cq = ehca_create_cq;
296 shca->ib_device.destroy_cq = ehca_destroy_cq;
297 shca->ib_device.resize_cq = ehca_resize_cq;
298 shca->ib_device.poll_cq = ehca_poll_cq;
299 /* shca->ib_device.peek_cq = ehca_peek_cq; */
300 shca->ib_device.req_notify_cq = ehca_req_notify_cq;
301 /* shca->ib_device.req_ncomp_notif = ehca_req_ncomp_notif; */
302 shca->ib_device.get_dma_mr = ehca_get_dma_mr;
303 shca->ib_device.reg_phys_mr = ehca_reg_phys_mr;
304 shca->ib_device.reg_user_mr = ehca_reg_user_mr;
305 shca->ib_device.query_mr = ehca_query_mr;
306 shca->ib_device.dereg_mr = ehca_dereg_mr;
307 shca->ib_device.rereg_phys_mr = ehca_rereg_phys_mr;
308 shca->ib_device.alloc_mw = ehca_alloc_mw;
309 shca->ib_device.bind_mw = ehca_bind_mw;
310 shca->ib_device.dealloc_mw = ehca_dealloc_mw;
311 shca->ib_device.alloc_fmr = ehca_alloc_fmr;
312 shca->ib_device.map_phys_fmr = ehca_map_phys_fmr;
313 shca->ib_device.unmap_fmr = ehca_unmap_fmr;
314 shca->ib_device.dealloc_fmr = ehca_dealloc_fmr;
315 shca->ib_device.attach_mcast = ehca_attach_mcast;
316 shca->ib_device.detach_mcast = ehca_detach_mcast;
317 /* shca->ib_device.process_mad = ehca_process_mad; */
318 shca->ib_device.mmap = ehca_mmap;
319
320 ret = ib_register_device(&shca->ib_device);
321 if (ret)
322 ehca_err(&shca->ib_device,
323 "ib_register_device() failed ret=%x", ret);
324
325 return ret;
326}
327
328static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
329{
330 struct ehca_sport *sport = &shca->sport[port - 1];
331 struct ib_cq *ibcq;
332 struct ib_qp *ibqp;
333 struct ib_qp_init_attr qp_init_attr;
334 int ret;
335
336 if (sport->ibcq_aqp1) {
337 ehca_err(&shca->ib_device, "AQP1 CQ is already created.");
338 return -EPERM;
339 }
340
341 ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void*)(-1), 10);
342 if (IS_ERR(ibcq)) {
343 ehca_err(&shca->ib_device, "Cannot create AQP1 CQ.");
344 return PTR_ERR(ibcq);
345 }
346 sport->ibcq_aqp1 = ibcq;
347
348 if (sport->ibqp_aqp1) {
349 ehca_err(&shca->ib_device, "AQP1 QP is already created.");
350 ret = -EPERM;
351 goto create_aqp1;
352 }
353
354 memset(&qp_init_attr, 0, sizeof(struct ib_qp_init_attr));
355 qp_init_attr.send_cq = ibcq;
356 qp_init_attr.recv_cq = ibcq;
357 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
358 qp_init_attr.cap.max_send_wr = 100;
359 qp_init_attr.cap.max_recv_wr = 100;
360 qp_init_attr.cap.max_send_sge = 2;
361 qp_init_attr.cap.max_recv_sge = 1;
362 qp_init_attr.qp_type = IB_QPT_GSI;
363 qp_init_attr.port_num = port;
364 qp_init_attr.qp_context = NULL;
365 qp_init_attr.event_handler = NULL;
366 qp_init_attr.srq = NULL;
367
368 ibqp = ib_create_qp(&shca->pd->ib_pd, &qp_init_attr);
369 if (IS_ERR(ibqp)) {
370 ehca_err(&shca->ib_device, "Cannot create AQP1 QP.");
371 ret = PTR_ERR(ibqp);
372 goto create_aqp1;
373 }
374 sport->ibqp_aqp1 = ibqp;
375
376 return 0;
377
378create_aqp1:
379 ib_destroy_cq(sport->ibcq_aqp1);
380 return ret;
381}
382
383static int ehca_destroy_aqp1(struct ehca_sport *sport)
384{
385 int ret;
386
387 ret = ib_destroy_qp(sport->ibqp_aqp1);
388 if (ret) {
389 ehca_gen_err("Cannot destroy AQP1 QP. ret=%x", ret);
390 return ret;
391 }
392
393 ret = ib_destroy_cq(sport->ibcq_aqp1);
394 if (ret)
395 ehca_gen_err("Cannot destroy AQP1 CQ. ret=%x", ret);
396
397 return ret;
398}
399
400static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf)
401{
402 return snprintf(buf, PAGE_SIZE, "%d\n",
403 ehca_debug_level);
404}
405
406static ssize_t ehca_store_debug_level(struct device_driver *ddp,
407 const char *buf, size_t count)
408{
409 int value = (*buf) - '0';
410 if (value >= 0 && value <= 9)
411 ehca_debug_level = value;
412 return 1;
413}
414
415DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR,
416 ehca_show_debug_level, ehca_store_debug_level);
417
418void ehca_create_driver_sysfs(struct ibmebus_driver *drv)
419{
420 driver_create_file(&drv->driver, &driver_attr_debug_level);
421}
422
423void ehca_remove_driver_sysfs(struct ibmebus_driver *drv)
424{
425 driver_remove_file(&drv->driver, &driver_attr_debug_level);
426}
427
428#define EHCA_RESOURCE_ATTR(name) \
429static ssize_t ehca_show_##name(struct device *dev, \
430 struct device_attribute *attr, \
431 char *buf) \
432{ \
433 struct ehca_shca *shca; \
434 struct hipz_query_hca *rblock; \
435 int data; \
436 \
437 shca = dev->driver_data; \
438 \
439 rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); \
440 if (!rblock) { \
441 dev_err(dev, "Can't allocate rblock memory."); \
442 return 0; \
443 } \
444 \
445 if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \
446 dev_err(dev, "Can't query device properties"); \
447 kfree(rblock); \
448 return 0; \
449 } \
450 \
451 data = rblock->name; \
452 kfree(rblock); \
453 \
454 if ((strcmp(#name, "num_ports") == 0) && (ehca_nr_ports == 1)) \
455 return snprintf(buf, 256, "1\n"); \
456 else \
457 return snprintf(buf, 256, "%d\n", data); \
458 \
459} \
460static DEVICE_ATTR(name, S_IRUGO, ehca_show_##name, NULL);
461
462EHCA_RESOURCE_ATTR(num_ports);
463EHCA_RESOURCE_ATTR(hw_ver);
464EHCA_RESOURCE_ATTR(max_eq);
465EHCA_RESOURCE_ATTR(cur_eq);
466EHCA_RESOURCE_ATTR(max_cq);
467EHCA_RESOURCE_ATTR(cur_cq);
468EHCA_RESOURCE_ATTR(max_qp);
469EHCA_RESOURCE_ATTR(cur_qp);
470EHCA_RESOURCE_ATTR(max_mr);
471EHCA_RESOURCE_ATTR(cur_mr);
472EHCA_RESOURCE_ATTR(max_mw);
473EHCA_RESOURCE_ATTR(cur_mw);
474EHCA_RESOURCE_ATTR(max_pd);
475EHCA_RESOURCE_ATTR(max_ah);
476
477static ssize_t ehca_show_adapter_handle(struct device *dev,
478 struct device_attribute *attr,
479 char *buf)
480{
481 struct ehca_shca *shca = dev->driver_data;
482
483 return sprintf(buf, "%lx\n", shca->ipz_hca_handle.handle);
484
485}
486static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
487
488
489void ehca_create_device_sysfs(struct ibmebus_dev *dev)
490{
491 device_create_file(&dev->ofdev.dev, &dev_attr_adapter_handle);
492 device_create_file(&dev->ofdev.dev, &dev_attr_num_ports);
493 device_create_file(&dev->ofdev.dev, &dev_attr_hw_ver);
494 device_create_file(&dev->ofdev.dev, &dev_attr_max_eq);
495 device_create_file(&dev->ofdev.dev, &dev_attr_cur_eq);
496 device_create_file(&dev->ofdev.dev, &dev_attr_max_cq);
497 device_create_file(&dev->ofdev.dev, &dev_attr_cur_cq);
498 device_create_file(&dev->ofdev.dev, &dev_attr_max_qp);
499 device_create_file(&dev->ofdev.dev, &dev_attr_cur_qp);
500 device_create_file(&dev->ofdev.dev, &dev_attr_max_mr);
501 device_create_file(&dev->ofdev.dev, &dev_attr_cur_mr);
502 device_create_file(&dev->ofdev.dev, &dev_attr_max_mw);
503 device_create_file(&dev->ofdev.dev, &dev_attr_cur_mw);
504 device_create_file(&dev->ofdev.dev, &dev_attr_max_pd);
505 device_create_file(&dev->ofdev.dev, &dev_attr_max_ah);
506}
507
508void ehca_remove_device_sysfs(struct ibmebus_dev *dev)
509{
510 device_remove_file(&dev->ofdev.dev, &dev_attr_adapter_handle);
511 device_remove_file(&dev->ofdev.dev, &dev_attr_num_ports);
512 device_remove_file(&dev->ofdev.dev, &dev_attr_hw_ver);
513 device_remove_file(&dev->ofdev.dev, &dev_attr_max_eq);
514 device_remove_file(&dev->ofdev.dev, &dev_attr_cur_eq);
515 device_remove_file(&dev->ofdev.dev, &dev_attr_max_cq);
516 device_remove_file(&dev->ofdev.dev, &dev_attr_cur_cq);
517 device_remove_file(&dev->ofdev.dev, &dev_attr_max_qp);
518 device_remove_file(&dev->ofdev.dev, &dev_attr_cur_qp);
519 device_remove_file(&dev->ofdev.dev, &dev_attr_max_mr);
520 device_remove_file(&dev->ofdev.dev, &dev_attr_cur_mr);
521 device_remove_file(&dev->ofdev.dev, &dev_attr_max_mw);
522 device_remove_file(&dev->ofdev.dev, &dev_attr_cur_mw);
523 device_remove_file(&dev->ofdev.dev, &dev_attr_max_pd);
524 device_remove_file(&dev->ofdev.dev, &dev_attr_max_ah);
525}
526
527static int __devinit ehca_probe(struct ibmebus_dev *dev,
528 const struct of_device_id *id)
529{
530 struct ehca_shca *shca;
531 u64 *handle;
532 struct ib_pd *ibpd;
533 int ret;
534
535 handle = (u64 *)get_property(dev->ofdev.node, "ibm,hca-handle", NULL);
536 if (!handle) {
537 ehca_gen_err("Cannot get eHCA handle for adapter: %s.",
538 dev->ofdev.node->full_name);
539 return -ENODEV;
540 }
541
542 if (!(*handle)) {
543 ehca_gen_err("Wrong eHCA handle for adapter: %s.",
544 dev->ofdev.node->full_name);
545 return -ENODEV;
546 }
547
548 shca = (struct ehca_shca *)ib_alloc_device(sizeof(*shca));
549 if (!shca) {
550 ehca_gen_err("Cannot allocate shca memory.");
551 return -ENOMEM;
552 }
553
554 shca->ibmebus_dev = dev;
555 shca->ipz_hca_handle.handle = *handle;
556 dev->ofdev.dev.driver_data = shca;
557
558 ret = ehca_sense_attributes(shca);
559 if (ret < 0) {
560 ehca_gen_err("Cannot sense eHCA attributes.");
561 goto probe1;
562 }
563
564 ret = ehca_register_device(shca);
565 if (ret) {
566 ehca_gen_err("Cannot register Infiniband device");
567 goto probe1;
568 }
569
570 /* create event queues */
571 ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, 2048);
572 if (ret) {
573 ehca_err(&shca->ib_device, "Cannot create EQ.");
574 goto probe2;
575 }
576
577 ret = ehca_create_eq(shca, &shca->neq, EHCA_NEQ, 513);
578 if (ret) {
579 ehca_err(&shca->ib_device, "Cannot create NEQ.");
580 goto probe3;
581 }
582
583 /* create internal protection domain */
584 ibpd = ehca_alloc_pd(&shca->ib_device, (void*)(-1), NULL);
585 if (IS_ERR(ibpd)) {
586 ehca_err(&shca->ib_device, "Cannot create internal PD.");
587 ret = PTR_ERR(ibpd);
588 goto probe4;
589 }
590
591 shca->pd = container_of(ibpd, struct ehca_pd, ib_pd);
592 shca->pd->ib_pd.device = &shca->ib_device;
593
594 /* create internal max MR */
595 ret = ehca_reg_internal_maxmr(shca, shca->pd, &shca->maxmr);
596
597 if (ret) {
598 ehca_err(&shca->ib_device, "Cannot create internal MR ret=%x",
599 ret);
600 goto probe5;
601 }
602
603 /* create AQP1 for port 1 */
604 if (ehca_open_aqp1 == 1) {
605 shca->sport[0].port_state = IB_PORT_DOWN;
606 ret = ehca_create_aqp1(shca, 1);
607 if (ret) {
608 ehca_err(&shca->ib_device,
609 "Cannot create AQP1 for port 1.");
610 goto probe6;
611 }
612 }
613
614 /* create AQP1 for port 2 */
615 if ((ehca_open_aqp1 == 1) && (shca->num_ports == 2)) {
616 shca->sport[1].port_state = IB_PORT_DOWN;
617 ret = ehca_create_aqp1(shca, 2);
618 if (ret) {
619 ehca_err(&shca->ib_device,
620 "Cannot create AQP1 for port 2.");
621 goto probe7;
622 }
623 }
624
625 ehca_create_device_sysfs(dev);
626
627 spin_lock(&shca_list_lock);
628 list_add(&shca->shca_list, &shca_list);
629 spin_unlock(&shca_list_lock);
630
631 return 0;
632
633probe7:
634 ret = ehca_destroy_aqp1(&shca->sport[0]);
635 if (ret)
636 ehca_err(&shca->ib_device,
637 "Cannot destroy AQP1 for port 1. ret=%x", ret);
638
639probe6:
640 ret = ehca_dereg_internal_maxmr(shca);
641 if (ret)
642 ehca_err(&shca->ib_device,
643 "Cannot destroy internal MR. ret=%x", ret);
644
645probe5:
646 ret = ehca_dealloc_pd(&shca->pd->ib_pd);
647 if (ret)
648 ehca_err(&shca->ib_device,
649 "Cannot destroy internal PD. ret=%x", ret);
650
651probe4:
652 ret = ehca_destroy_eq(shca, &shca->neq);
653 if (ret)
654 ehca_err(&shca->ib_device,
655 "Cannot destroy NEQ. ret=%x", ret);
656
657probe3:
658 ret = ehca_destroy_eq(shca, &shca->eq);
659 if (ret)
660 ehca_err(&shca->ib_device,
661 "Cannot destroy EQ. ret=%x", ret);
662
663probe2:
664 ib_unregister_device(&shca->ib_device);
665
666probe1:
667 ib_dealloc_device(&shca->ib_device);
668
669 return -EINVAL;
670}
671
672static int __devexit ehca_remove(struct ibmebus_dev *dev)
673{
674 struct ehca_shca *shca = dev->ofdev.dev.driver_data;
675 int ret;
676
677 ehca_remove_device_sysfs(dev);
678
679 if (ehca_open_aqp1 == 1) {
680 int i;
681 for (i = 0; i < shca->num_ports; i++) {
682 ret = ehca_destroy_aqp1(&shca->sport[i]);
683 if (ret)
684 ehca_err(&shca->ib_device,
685 "Cannot destroy AQP1 for port %x "
686 "ret=%x", ret, i);
687 }
688 }
689
690 ib_unregister_device(&shca->ib_device);
691
692 ret = ehca_dereg_internal_maxmr(shca);
693 if (ret)
694 ehca_err(&shca->ib_device,
695 "Cannot destroy internal MR. ret=%x", ret);
696
697 ret = ehca_dealloc_pd(&shca->pd->ib_pd);
698 if (ret)
699 ehca_err(&shca->ib_device,
700 "Cannot destroy internal PD. ret=%x", ret);
701
702 ret = ehca_destroy_eq(shca, &shca->eq);
703 if (ret)
704 ehca_err(&shca->ib_device, "Cannot destroy EQ. ret=%x", ret);
705
706 ret = ehca_destroy_eq(shca, &shca->neq);
707 if (ret)
708 ehca_err(&shca->ib_device, "Canot destroy NEQ. ret=%x", ret);
709
710 ib_dealloc_device(&shca->ib_device);
711
712 spin_lock(&shca_list_lock);
713 list_del(&shca->shca_list);
714 spin_unlock(&shca_list_lock);
715
716 return ret;
717}
718
719static struct of_device_id ehca_device_table[] =
720{
721 {
722 .name = "lhca",
723 .compatible = "IBM,lhca",
724 },
725 {},
726};
727
728static struct ibmebus_driver ehca_driver = {
729 .name = "ehca",
730 .id_table = ehca_device_table,
731 .probe = ehca_probe,
732 .remove = ehca_remove,
733};
734
735void ehca_poll_eqs(unsigned long data)
736{
737 struct ehca_shca *shca;
738
739 spin_lock(&shca_list_lock);
740 list_for_each_entry(shca, &shca_list, shca_list) {
741 if (shca->eq.is_initialized)
742 ehca_tasklet_eq((unsigned long)(void*)shca);
743 }
744 mod_timer(&poll_eqs_timer, jiffies + HZ);
745 spin_unlock(&shca_list_lock);
746}
747
748int __init ehca_module_init(void)
749{
750 int ret;
751
752 printk(KERN_INFO "eHCA Infiniband Device Driver "
753 "(Rel.: SVNEHCA_0016)\n");
754 idr_init(&ehca_qp_idr);
755 idr_init(&ehca_cq_idr);
756 spin_lock_init(&ehca_qp_idr_lock);
757 spin_lock_init(&ehca_cq_idr_lock);
758
759 INIT_LIST_HEAD(&shca_list);
760 spin_lock_init(&shca_list_lock);
761
762 if ((ret = ehca_create_comp_pool())) {
763 ehca_gen_err("Cannot create comp pool.");
764 return ret;
765 }
766
767 if ((ret = ehca_create_slab_caches())) {
768 ehca_gen_err("Cannot create SLAB caches");
769 ret = -ENOMEM;
770 goto module_init1;
771 }
772
773 if ((ret = ibmebus_register_driver(&ehca_driver))) {
774 ehca_gen_err("Cannot register eHCA device driver");
775 ret = -EINVAL;
776 goto module_init2;
777 }
778
779 ehca_create_driver_sysfs(&ehca_driver);
780
781 if (ehca_poll_all_eqs != 1) {
782 ehca_gen_err("WARNING!!!");
783 ehca_gen_err("It is possible to lose interrupts.");
784 } else {
785 init_timer(&poll_eqs_timer);
786 poll_eqs_timer.function = ehca_poll_eqs;
787 poll_eqs_timer.expires = jiffies + HZ;
788 add_timer(&poll_eqs_timer);
789 }
790
791 return 0;
792
793module_init2:
794 ehca_destroy_slab_caches();
795
796module_init1:
797 ehca_destroy_comp_pool();
798 return ret;
799};
800
801void __exit ehca_module_exit(void)
802{
803 if (ehca_poll_all_eqs == 1)
804 del_timer_sync(&poll_eqs_timer);
805
806 ehca_remove_driver_sysfs(&ehca_driver);
807 ibmebus_unregister_driver(&ehca_driver);
808
809 ehca_destroy_slab_caches();
810
811 ehca_destroy_comp_pool();
812
813 idr_destroy(&ehca_cq_idr);
814 idr_destroy(&ehca_qp_idr);
815};
816
817module_init(ehca_module_init);
818module_exit(ehca_module_exit);
diff --git a/drivers/infiniband/hw/ehca/ehca_mcast.c b/drivers/infiniband/hw/ehca/ehca_mcast.c
new file mode 100644
index 000000000000..32a870660bfe
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_mcast.c
@@ -0,0 +1,131 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * mcast functions
5 *
6 * Authors: Khadija Souissi <souissik@de.ibm.com>
7 * Waleri Fomin <fomin@de.ibm.com>
8 * Reinhard Ernst <rernst@de.ibm.com>
9 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
10 * Heiko J Schick <schickhj@de.ibm.com>
11 *
12 * Copyright (c) 2005 IBM Corporation
13 *
14 * All rights reserved.
15 *
16 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
17 * BSD.
18 *
19 * OpenIB BSD License
20 *
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions are met:
23 *
24 * Redistributions of source code must retain the above copyright notice, this
25 * list of conditions and the following disclaimer.
26 *
27 * Redistributions in binary form must reproduce the above copyright notice,
28 * this list of conditions and the following disclaimer in the documentation
29 * and/or other materials
30 * provided with the distribution.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
33 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
36 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
37 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
38 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
39 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
40 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
42 * POSSIBILITY OF SUCH DAMAGE.
43 */
44
45#include <linux/module.h>
46#include <linux/err.h>
47#include "ehca_classes.h"
48#include "ehca_tools.h"
49#include "ehca_qes.h"
50#include "ehca_iverbs.h"
51#include "hcp_if.h"
52
53#define MAX_MC_LID 0xFFFE
54#define MIN_MC_LID 0xC000 /* Multicast limits */
55#define EHCA_VALID_MULTICAST_GID(gid) ((gid)[0] == 0xFF)
56#define EHCA_VALID_MULTICAST_LID(lid) \
57 (((lid) >= MIN_MC_LID) && ((lid) <= MAX_MC_LID))
58
59int ehca_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
60{
61 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
62 struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
63 ib_device);
64 union ib_gid my_gid;
65 u64 subnet_prefix, interface_id, h_ret;
66
67 if (ibqp->qp_type != IB_QPT_UD) {
68 ehca_err(ibqp->device, "invalid qp_type=%x", ibqp->qp_type);
69 return -EINVAL;
70 }
71
72 if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
73 ehca_err(ibqp->device, "invalid mulitcast gid");
74 return -EINVAL;
75 } else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
76 ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
77 return -EINVAL;
78 }
79
80 memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid));
81
82 subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
83 interface_id = be64_to_cpu(my_gid.global.interface_id);
84 h_ret = hipz_h_attach_mcqp(shca->ipz_hca_handle,
85 my_qp->ipz_qp_handle,
86 my_qp->galpas.kernel,
87 lid, subnet_prefix, interface_id);
88 if (h_ret != H_SUCCESS)
89 ehca_err(ibqp->device,
90 "ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed "
91 "h_ret=%lx", my_qp, ibqp->qp_num, h_ret);
92
93 return ehca2ib_return_code(h_ret);
94}
95
96int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
97{
98 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
99 struct ehca_shca *shca = container_of(ibqp->pd->device,
100 struct ehca_shca, ib_device);
101 union ib_gid my_gid;
102 u64 subnet_prefix, interface_id, h_ret;
103
104 if (ibqp->qp_type != IB_QPT_UD) {
105 ehca_err(ibqp->device, "invalid qp_type %x", ibqp->qp_type);
106 return -EINVAL;
107 }
108
109 if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
110 ehca_err(ibqp->device, "invalid mulitcast gid");
111 return -EINVAL;
112 } else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
113 ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
114 return -EINVAL;
115 }
116
117 memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid));
118
119 subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
120 interface_id = be64_to_cpu(my_gid.global.interface_id);
121 h_ret = hipz_h_detach_mcqp(shca->ipz_hca_handle,
122 my_qp->ipz_qp_handle,
123 my_qp->galpas.kernel,
124 lid, subnet_prefix, interface_id);
125 if (h_ret != H_SUCCESS)
126 ehca_err(ibqp->device,
127 "ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed "
128 "h_ret=%lx", my_qp, ibqp->qp_num, h_ret);
129
130 return ehca2ib_return_code(h_ret);
131}
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
new file mode 100644
index 000000000000..5ca65441e1da
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -0,0 +1,2261 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * MR/MW functions
5 *
6 * Authors: Dietmar Decker <ddecker@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#include <asm/current.h>
43
44#include "ehca_iverbs.h"
45#include "ehca_mrmw.h"
46#include "hcp_if.h"
47#include "hipz_hw.h"
48
49static struct kmem_cache *mr_cache;
50static struct kmem_cache *mw_cache;
51
52static struct ehca_mr *ehca_mr_new(void)
53{
54 struct ehca_mr *me;
55
56 me = kmem_cache_alloc(mr_cache, SLAB_KERNEL);
57 if (me) {
58 memset(me, 0, sizeof(struct ehca_mr));
59 spin_lock_init(&me->mrlock);
60 } else
61 ehca_gen_err("alloc failed");
62
63 return me;
64}
65
66static void ehca_mr_delete(struct ehca_mr *me)
67{
68 kmem_cache_free(mr_cache, me);
69}
70
71static struct ehca_mw *ehca_mw_new(void)
72{
73 struct ehca_mw *me;
74
75 me = kmem_cache_alloc(mw_cache, SLAB_KERNEL);
76 if (me) {
77 memset(me, 0, sizeof(struct ehca_mw));
78 spin_lock_init(&me->mwlock);
79 } else
80 ehca_gen_err("alloc failed");
81
82 return me;
83}
84
85static void ehca_mw_delete(struct ehca_mw *me)
86{
87 kmem_cache_free(mw_cache, me);
88}
89
90/*----------------------------------------------------------------------*/
91
92struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
93{
94 struct ib_mr *ib_mr;
95 int ret;
96 struct ehca_mr *e_maxmr;
97 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
98 struct ehca_shca *shca =
99 container_of(pd->device, struct ehca_shca, ib_device);
100
101 if (shca->maxmr) {
102 e_maxmr = ehca_mr_new();
103 if (!e_maxmr) {
104 ehca_err(&shca->ib_device, "out of memory");
105 ib_mr = ERR_PTR(-ENOMEM);
106 goto get_dma_mr_exit0;
107 }
108
109 ret = ehca_reg_maxmr(shca, e_maxmr, (u64*)KERNELBASE,
110 mr_access_flags, e_pd,
111 &e_maxmr->ib.ib_mr.lkey,
112 &e_maxmr->ib.ib_mr.rkey);
113 if (ret) {
114 ib_mr = ERR_PTR(ret);
115 goto get_dma_mr_exit0;
116 }
117 ib_mr = &e_maxmr->ib.ib_mr;
118 } else {
119 ehca_err(&shca->ib_device, "no internal max-MR exist!");
120 ib_mr = ERR_PTR(-EINVAL);
121 goto get_dma_mr_exit0;
122 }
123
124get_dma_mr_exit0:
125 if (IS_ERR(ib_mr))
126 ehca_err(&shca->ib_device, "rc=%lx pd=%p mr_access_flags=%x ",
127 PTR_ERR(ib_mr), pd, mr_access_flags);
128 return ib_mr;
129} /* end ehca_get_dma_mr() */
130
131/*----------------------------------------------------------------------*/
132
133struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
134 struct ib_phys_buf *phys_buf_array,
135 int num_phys_buf,
136 int mr_access_flags,
137 u64 *iova_start)
138{
139 struct ib_mr *ib_mr;
140 int ret;
141 struct ehca_mr *e_mr;
142 struct ehca_shca *shca =
143 container_of(pd->device, struct ehca_shca, ib_device);
144 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
145
146 u64 size;
147 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
148 u32 num_pages_mr;
149 u32 num_pages_4k; /* 4k portion "pages" */
150
151 if ((num_phys_buf <= 0) || !phys_buf_array) {
152 ehca_err(pd->device, "bad input values: num_phys_buf=%x "
153 "phys_buf_array=%p", num_phys_buf, phys_buf_array);
154 ib_mr = ERR_PTR(-EINVAL);
155 goto reg_phys_mr_exit0;
156 }
157 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
158 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
159 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
160 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
161 /*
162 * Remote Write Access requires Local Write Access
163 * Remote Atomic Access requires Local Write Access
164 */
165 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
166 mr_access_flags);
167 ib_mr = ERR_PTR(-EINVAL);
168 goto reg_phys_mr_exit0;
169 }
170
171 /* check physical buffer list and calculate size */
172 ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, num_phys_buf,
173 iova_start, &size);
174 if (ret) {
175 ib_mr = ERR_PTR(ret);
176 goto reg_phys_mr_exit0;
177 }
178 if ((size == 0) ||
179 (((u64)iova_start + size) < (u64)iova_start)) {
180 ehca_err(pd->device, "bad input values: size=%lx iova_start=%p",
181 size, iova_start);
182 ib_mr = ERR_PTR(-EINVAL);
183 goto reg_phys_mr_exit0;
184 }
185
186 e_mr = ehca_mr_new();
187 if (!e_mr) {
188 ehca_err(pd->device, "out of memory");
189 ib_mr = ERR_PTR(-ENOMEM);
190 goto reg_phys_mr_exit0;
191 }
192
193 /* determine number of MR pages */
194 num_pages_mr = ((((u64)iova_start % PAGE_SIZE) + size +
195 PAGE_SIZE - 1) / PAGE_SIZE);
196 num_pages_4k = ((((u64)iova_start % EHCA_PAGESIZE) + size +
197 EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
198
199 /* register MR on HCA */
200 if (ehca_mr_is_maxmr(size, iova_start)) {
201 e_mr->flags |= EHCA_MR_FLAG_MAXMR;
202 ret = ehca_reg_maxmr(shca, e_mr, iova_start, mr_access_flags,
203 e_pd, &e_mr->ib.ib_mr.lkey,
204 &e_mr->ib.ib_mr.rkey);
205 if (ret) {
206 ib_mr = ERR_PTR(ret);
207 goto reg_phys_mr_exit1;
208 }
209 } else {
210 pginfo.type = EHCA_MR_PGI_PHYS;
211 pginfo.num_pages = num_pages_mr;
212 pginfo.num_4k = num_pages_4k;
213 pginfo.num_phys_buf = num_phys_buf;
214 pginfo.phys_buf_array = phys_buf_array;
215 pginfo.next_4k = (((u64)iova_start & ~PAGE_MASK) /
216 EHCA_PAGESIZE);
217
218 ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
219 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
220 &e_mr->ib.ib_mr.rkey);
221 if (ret) {
222 ib_mr = ERR_PTR(ret);
223 goto reg_phys_mr_exit1;
224 }
225 }
226
227 /* successful registration of all pages */
228 return &e_mr->ib.ib_mr;
229
230reg_phys_mr_exit1:
231 ehca_mr_delete(e_mr);
232reg_phys_mr_exit0:
233 if (IS_ERR(ib_mr))
234 ehca_err(pd->device, "rc=%lx pd=%p phys_buf_array=%p "
235 "num_phys_buf=%x mr_access_flags=%x iova_start=%p",
236 PTR_ERR(ib_mr), pd, phys_buf_array,
237 num_phys_buf, mr_access_flags, iova_start);
238 return ib_mr;
239} /* end ehca_reg_phys_mr() */
240
241/*----------------------------------------------------------------------*/
242
243struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd,
244 struct ib_umem *region,
245 int mr_access_flags,
246 struct ib_udata *udata)
247{
248 struct ib_mr *ib_mr;
249 struct ehca_mr *e_mr;
250 struct ehca_shca *shca =
251 container_of(pd->device, struct ehca_shca, ib_device);
252 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
253 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
254 int ret;
255 u32 num_pages_mr;
256 u32 num_pages_4k; /* 4k portion "pages" */
257
258 if (!pd) {
259 ehca_gen_err("bad pd=%p", pd);
260 return ERR_PTR(-EFAULT);
261 }
262 if (!region) {
263 ehca_err(pd->device, "bad input values: region=%p", region);
264 ib_mr = ERR_PTR(-EINVAL);
265 goto reg_user_mr_exit0;
266 }
267 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
268 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
269 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
270 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
271 /*
272 * Remote Write Access requires Local Write Access
273 * Remote Atomic Access requires Local Write Access
274 */
275 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
276 mr_access_flags);
277 ib_mr = ERR_PTR(-EINVAL);
278 goto reg_user_mr_exit0;
279 }
280 if (region->page_size != PAGE_SIZE) {
281 ehca_err(pd->device, "page size not supported, "
282 "region->page_size=%x", region->page_size);
283 ib_mr = ERR_PTR(-EINVAL);
284 goto reg_user_mr_exit0;
285 }
286
287 if ((region->length == 0) ||
288 ((region->virt_base + region->length) < region->virt_base)) {
289 ehca_err(pd->device, "bad input values: length=%lx "
290 "virt_base=%lx", region->length, region->virt_base);
291 ib_mr = ERR_PTR(-EINVAL);
292 goto reg_user_mr_exit0;
293 }
294
295 e_mr = ehca_mr_new();
296 if (!e_mr) {
297 ehca_err(pd->device, "out of memory");
298 ib_mr = ERR_PTR(-ENOMEM);
299 goto reg_user_mr_exit0;
300 }
301
302 /* determine number of MR pages */
303 num_pages_mr = (((region->virt_base % PAGE_SIZE) + region->length +
304 PAGE_SIZE - 1) / PAGE_SIZE);
305 num_pages_4k = (((region->virt_base % EHCA_PAGESIZE) + region->length +
306 EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
307
308 /* register MR on HCA */
309 pginfo.type = EHCA_MR_PGI_USER;
310 pginfo.num_pages = num_pages_mr;
311 pginfo.num_4k = num_pages_4k;
312 pginfo.region = region;
313 pginfo.next_4k = region->offset / EHCA_PAGESIZE;
314 pginfo.next_chunk = list_prepare_entry(pginfo.next_chunk,
315 (&region->chunk_list),
316 list);
317
318 ret = ehca_reg_mr(shca, e_mr, (u64*)region->virt_base,
319 region->length, mr_access_flags, e_pd, &pginfo,
320 &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey);
321 if (ret) {
322 ib_mr = ERR_PTR(ret);
323 goto reg_user_mr_exit1;
324 }
325
326 /* successful registration of all pages */
327 return &e_mr->ib.ib_mr;
328
329reg_user_mr_exit1:
330 ehca_mr_delete(e_mr);
331reg_user_mr_exit0:
332 if (IS_ERR(ib_mr))
333 ehca_err(pd->device, "rc=%lx pd=%p region=%p mr_access_flags=%x"
334 " udata=%p",
335 PTR_ERR(ib_mr), pd, region, mr_access_flags, udata);
336 return ib_mr;
337} /* end ehca_reg_user_mr() */
338
339/*----------------------------------------------------------------------*/
340
341int ehca_rereg_phys_mr(struct ib_mr *mr,
342 int mr_rereg_mask,
343 struct ib_pd *pd,
344 struct ib_phys_buf *phys_buf_array,
345 int num_phys_buf,
346 int mr_access_flags,
347 u64 *iova_start)
348{
349 int ret;
350
351 struct ehca_shca *shca =
352 container_of(mr->device, struct ehca_shca, ib_device);
353 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
354 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
355 u64 new_size;
356 u64 *new_start;
357 u32 new_acl;
358 struct ehca_pd *new_pd;
359 u32 tmp_lkey, tmp_rkey;
360 unsigned long sl_flags;
361 u32 num_pages_mr = 0;
362 u32 num_pages_4k = 0; /* 4k portion "pages" */
363 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
364 u32 cur_pid = current->tgid;
365
366 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
367 (my_pd->ownpid != cur_pid)) {
368 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
369 cur_pid, my_pd->ownpid);
370 ret = -EINVAL;
371 goto rereg_phys_mr_exit0;
372 }
373
374 if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) {
375 /* TODO not supported, because PHYP rereg hCall needs pages */
376 ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not "
377 "supported yet, mr_rereg_mask=%x", mr_rereg_mask);
378 ret = -EINVAL;
379 goto rereg_phys_mr_exit0;
380 }
381
382 if (mr_rereg_mask & IB_MR_REREG_PD) {
383 if (!pd) {
384 ehca_err(mr->device, "rereg with bad pd, pd=%p "
385 "mr_rereg_mask=%x", pd, mr_rereg_mask);
386 ret = -EINVAL;
387 goto rereg_phys_mr_exit0;
388 }
389 }
390
391 if ((mr_rereg_mask &
392 ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) ||
393 (mr_rereg_mask == 0)) {
394 ret = -EINVAL;
395 goto rereg_phys_mr_exit0;
396 }
397
398 /* check other parameters */
399 if (e_mr == shca->maxmr) {
400 /* should be impossible, however reject to be sure */
401 ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p "
402 "shca->maxmr=%p mr->lkey=%x",
403 mr, shca->maxmr, mr->lkey);
404 ret = -EINVAL;
405 goto rereg_phys_mr_exit0;
406 }
407 if (mr_rereg_mask & IB_MR_REREG_TRANS) { /* transl., i.e. addr/size */
408 if (e_mr->flags & EHCA_MR_FLAG_FMR) {
409 ehca_err(mr->device, "not supported for FMR, mr=%p "
410 "flags=%x", mr, e_mr->flags);
411 ret = -EINVAL;
412 goto rereg_phys_mr_exit0;
413 }
414 if (!phys_buf_array || num_phys_buf <= 0) {
415 ehca_err(mr->device, "bad input values: mr_rereg_mask=%x"
416 " phys_buf_array=%p num_phys_buf=%x",
417 mr_rereg_mask, phys_buf_array, num_phys_buf);
418 ret = -EINVAL;
419 goto rereg_phys_mr_exit0;
420 }
421 }
422 if ((mr_rereg_mask & IB_MR_REREG_ACCESS) && /* change ACL */
423 (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
424 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
425 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
426 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)))) {
427 /*
428 * Remote Write Access requires Local Write Access
429 * Remote Atomic Access requires Local Write Access
430 */
431 ehca_err(mr->device, "bad input values: mr_rereg_mask=%x "
432 "mr_access_flags=%x", mr_rereg_mask, mr_access_flags);
433 ret = -EINVAL;
434 goto rereg_phys_mr_exit0;
435 }
436
437 /* set requested values dependent on rereg request */
438 spin_lock_irqsave(&e_mr->mrlock, sl_flags);
439 new_start = e_mr->start; /* new == old address */
440 new_size = e_mr->size; /* new == old length */
441 new_acl = e_mr->acl; /* new == old access control */
442 new_pd = container_of(mr->pd,struct ehca_pd,ib_pd); /*new == old PD*/
443
444 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
445 new_start = iova_start; /* change address */
446 /* check physical buffer list and calculate size */
447 ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array,
448 num_phys_buf, iova_start,
449 &new_size);
450 if (ret)
451 goto rereg_phys_mr_exit1;
452 if ((new_size == 0) ||
453 (((u64)iova_start + new_size) < (u64)iova_start)) {
454 ehca_err(mr->device, "bad input values: new_size=%lx "
455 "iova_start=%p", new_size, iova_start);
456 ret = -EINVAL;
457 goto rereg_phys_mr_exit1;
458 }
459 num_pages_mr = ((((u64)new_start % PAGE_SIZE) + new_size +
460 PAGE_SIZE - 1) / PAGE_SIZE);
461 num_pages_4k = ((((u64)new_start % EHCA_PAGESIZE) + new_size +
462 EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
463 pginfo.type = EHCA_MR_PGI_PHYS;
464 pginfo.num_pages = num_pages_mr;
465 pginfo.num_4k = num_pages_4k;
466 pginfo.num_phys_buf = num_phys_buf;
467 pginfo.phys_buf_array = phys_buf_array;
468 pginfo.next_4k = (((u64)iova_start & ~PAGE_MASK) /
469 EHCA_PAGESIZE);
470 }
471 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
472 new_acl = mr_access_flags;
473 if (mr_rereg_mask & IB_MR_REREG_PD)
474 new_pd = container_of(pd, struct ehca_pd, ib_pd);
475
476 ret = ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl,
477 new_pd, &pginfo, &tmp_lkey, &tmp_rkey);
478 if (ret)
479 goto rereg_phys_mr_exit1;
480
481 /* successful reregistration */
482 if (mr_rereg_mask & IB_MR_REREG_PD)
483 mr->pd = pd;
484 mr->lkey = tmp_lkey;
485 mr->rkey = tmp_rkey;
486
487rereg_phys_mr_exit1:
488 spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
489rereg_phys_mr_exit0:
490 if (ret)
491 ehca_err(mr->device, "ret=%x mr=%p mr_rereg_mask=%x pd=%p "
492 "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "
493 "iova_start=%p",
494 ret, mr, mr_rereg_mask, pd, phys_buf_array,
495 num_phys_buf, mr_access_flags, iova_start);
496 return ret;
497} /* end ehca_rereg_phys_mr() */
498
499/*----------------------------------------------------------------------*/
500
501int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
502{
503 int ret = 0;
504 u64 h_ret;
505 struct ehca_shca *shca =
506 container_of(mr->device, struct ehca_shca, ib_device);
507 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
508 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
509 u32 cur_pid = current->tgid;
510 unsigned long sl_flags;
511 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
512
513 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
514 (my_pd->ownpid != cur_pid)) {
515 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
516 cur_pid, my_pd->ownpid);
517 ret = -EINVAL;
518 goto query_mr_exit0;
519 }
520
521 if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
522 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
523 "e_mr->flags=%x", mr, e_mr, e_mr->flags);
524 ret = -EINVAL;
525 goto query_mr_exit0;
526 }
527
528 memset(mr_attr, 0, sizeof(struct ib_mr_attr));
529 spin_lock_irqsave(&e_mr->mrlock, sl_flags);
530
531 h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout);
532 if (h_ret != H_SUCCESS) {
533 ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lx mr=%p "
534 "hca_hndl=%lx mr_hndl=%lx lkey=%x",
535 h_ret, mr, shca->ipz_hca_handle.handle,
536 e_mr->ipz_mr_handle.handle, mr->lkey);
537 ret = ehca_mrmw_map_hrc_query_mr(h_ret);
538 goto query_mr_exit1;
539 }
540 mr_attr->pd = mr->pd;
541 mr_attr->device_virt_addr = hipzout.vaddr;
542 mr_attr->size = hipzout.len;
543 mr_attr->lkey = hipzout.lkey;
544 mr_attr->rkey = hipzout.rkey;
545 ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags);
546
547query_mr_exit1:
548 spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
549query_mr_exit0:
550 if (ret)
551 ehca_err(mr->device, "ret=%x mr=%p mr_attr=%p",
552 ret, mr, mr_attr);
553 return ret;
554} /* end ehca_query_mr() */
555
556/*----------------------------------------------------------------------*/
557
558int ehca_dereg_mr(struct ib_mr *mr)
559{
560 int ret = 0;
561 u64 h_ret;
562 struct ehca_shca *shca =
563 container_of(mr->device, struct ehca_shca, ib_device);
564 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
565 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
566 u32 cur_pid = current->tgid;
567
568 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
569 (my_pd->ownpid != cur_pid)) {
570 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
571 cur_pid, my_pd->ownpid);
572 ret = -EINVAL;
573 goto dereg_mr_exit0;
574 }
575
576 if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
577 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
578 "e_mr->flags=%x", mr, e_mr, e_mr->flags);
579 ret = -EINVAL;
580 goto dereg_mr_exit0;
581 } else if (e_mr == shca->maxmr) {
582 /* should be impossible, however reject to be sure */
583 ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
584 "shca->maxmr=%p mr->lkey=%x",
585 mr, shca->maxmr, mr->lkey);
586 ret = -EINVAL;
587 goto dereg_mr_exit0;
588 }
589
590 /* TODO: BUSY: MR still has bound window(s) */
591 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
592 if (h_ret != H_SUCCESS) {
593 ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lx shca=%p "
594 "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",
595 h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
596 e_mr->ipz_mr_handle.handle, mr->lkey);
597 ret = ehca_mrmw_map_hrc_free_mr(h_ret);
598 goto dereg_mr_exit0;
599 }
600
601 /* successful deregistration */
602 ehca_mr_delete(e_mr);
603
604dereg_mr_exit0:
605 if (ret)
606 ehca_err(mr->device, "ret=%x mr=%p", ret, mr);
607 return ret;
608} /* end ehca_dereg_mr() */
609
610/*----------------------------------------------------------------------*/
611
612struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
613{
614 struct ib_mw *ib_mw;
615 u64 h_ret;
616 struct ehca_mw *e_mw;
617 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
618 struct ehca_shca *shca =
619 container_of(pd->device, struct ehca_shca, ib_device);
620 struct ehca_mw_hipzout_parms hipzout = {{0},0};
621
622 e_mw = ehca_mw_new();
623 if (!e_mw) {
624 ib_mw = ERR_PTR(-ENOMEM);
625 goto alloc_mw_exit0;
626 }
627
628 h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
629 e_pd->fw_pd, &hipzout);
630 if (h_ret != H_SUCCESS) {
631 ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lx "
632 "shca=%p hca_hndl=%lx mw=%p",
633 h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
634 ib_mw = ERR_PTR(ehca_mrmw_map_hrc_alloc(h_ret));
635 goto alloc_mw_exit1;
636 }
637 /* successful MW allocation */
638 e_mw->ipz_mw_handle = hipzout.handle;
639 e_mw->ib_mw.rkey = hipzout.rkey;
640 return &e_mw->ib_mw;
641
642alloc_mw_exit1:
643 ehca_mw_delete(e_mw);
644alloc_mw_exit0:
645 if (IS_ERR(ib_mw))
646 ehca_err(pd->device, "rc=%lx pd=%p", PTR_ERR(ib_mw), pd);
647 return ib_mw;
648} /* end ehca_alloc_mw() */
649
650/*----------------------------------------------------------------------*/
651
652int ehca_bind_mw(struct ib_qp *qp,
653 struct ib_mw *mw,
654 struct ib_mw_bind *mw_bind)
655{
656 /* TODO: not supported up to now */
657 ehca_gen_err("bind MW currently not supported by HCAD");
658
659 return -EPERM;
660} /* end ehca_bind_mw() */
661
662/*----------------------------------------------------------------------*/
663
664int ehca_dealloc_mw(struct ib_mw *mw)
665{
666 u64 h_ret;
667 struct ehca_shca *shca =
668 container_of(mw->device, struct ehca_shca, ib_device);
669 struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
670
671 h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
672 if (h_ret != H_SUCCESS) {
673 ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lx shca=%p "
674 "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx",
675 h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
676 e_mw->ipz_mw_handle.handle);
677 return ehca_mrmw_map_hrc_free_mw(h_ret);
678 }
679 /* successful deallocation */
680 ehca_mw_delete(e_mw);
681 return 0;
682} /* end ehca_dealloc_mw() */
683
684/*----------------------------------------------------------------------*/
685
686struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
687 int mr_access_flags,
688 struct ib_fmr_attr *fmr_attr)
689{
690 struct ib_fmr *ib_fmr;
691 struct ehca_shca *shca =
692 container_of(pd->device, struct ehca_shca, ib_device);
693 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
694 struct ehca_mr *e_fmr;
695 int ret;
696 u32 tmp_lkey, tmp_rkey;
697 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
698
699 /* check other parameters */
700 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
701 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
702 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
703 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
704 /*
705 * Remote Write Access requires Local Write Access
706 * Remote Atomic Access requires Local Write Access
707 */
708 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
709 mr_access_flags);
710 ib_fmr = ERR_PTR(-EINVAL);
711 goto alloc_fmr_exit0;
712 }
713 if (mr_access_flags & IB_ACCESS_MW_BIND) {
714 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
715 mr_access_flags);
716 ib_fmr = ERR_PTR(-EINVAL);
717 goto alloc_fmr_exit0;
718 }
719 if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
720 ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
721 "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
722 fmr_attr->max_pages, fmr_attr->max_maps,
723 fmr_attr->page_shift);
724 ib_fmr = ERR_PTR(-EINVAL);
725 goto alloc_fmr_exit0;
726 }
727 if (((1 << fmr_attr->page_shift) != EHCA_PAGESIZE) &&
728 ((1 << fmr_attr->page_shift) != PAGE_SIZE)) {
729 ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
730 fmr_attr->page_shift);
731 ib_fmr = ERR_PTR(-EINVAL);
732 goto alloc_fmr_exit0;
733 }
734
735 e_fmr = ehca_mr_new();
736 if (!e_fmr) {
737 ib_fmr = ERR_PTR(-ENOMEM);
738 goto alloc_fmr_exit0;
739 }
740 e_fmr->flags |= EHCA_MR_FLAG_FMR;
741
742 /* register MR on HCA */
743 ret = ehca_reg_mr(shca, e_fmr, NULL,
744 fmr_attr->max_pages * (1 << fmr_attr->page_shift),
745 mr_access_flags, e_pd, &pginfo,
746 &tmp_lkey, &tmp_rkey);
747 if (ret) {
748 ib_fmr = ERR_PTR(ret);
749 goto alloc_fmr_exit1;
750 }
751
752 /* successful */
753 e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
754 e_fmr->fmr_max_pages = fmr_attr->max_pages;
755 e_fmr->fmr_max_maps = fmr_attr->max_maps;
756 e_fmr->fmr_map_cnt = 0;
757 return &e_fmr->ib.ib_fmr;
758
759alloc_fmr_exit1:
760 ehca_mr_delete(e_fmr);
761alloc_fmr_exit0:
762 if (IS_ERR(ib_fmr))
763 ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x "
764 "fmr_attr=%p", PTR_ERR(ib_fmr), pd,
765 mr_access_flags, fmr_attr);
766 return ib_fmr;
767} /* end ehca_alloc_fmr() */
768
769/*----------------------------------------------------------------------*/
770
771int ehca_map_phys_fmr(struct ib_fmr *fmr,
772 u64 *page_list,
773 int list_len,
774 u64 iova)
775{
776 int ret;
777 struct ehca_shca *shca =
778 container_of(fmr->device, struct ehca_shca, ib_device);
779 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
780 struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
781 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
782 u32 tmp_lkey, tmp_rkey;
783
784 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
785 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
786 e_fmr, e_fmr->flags);
787 ret = -EINVAL;
788 goto map_phys_fmr_exit0;
789 }
790 ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
791 if (ret)
792 goto map_phys_fmr_exit0;
793 if (iova % e_fmr->fmr_page_size) {
794 /* only whole-numbered pages */
795 ehca_err(fmr->device, "bad iova, iova=%lx fmr_page_size=%x",
796 iova, e_fmr->fmr_page_size);
797 ret = -EINVAL;
798 goto map_phys_fmr_exit0;
799 }
800 if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
801 /* HCAD does not limit the maps, however trace this anyway */
802 ehca_info(fmr->device, "map limit exceeded, fmr=%p "
803 "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
804 fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
805 }
806
807 pginfo.type = EHCA_MR_PGI_FMR;
808 pginfo.num_pages = list_len;
809 pginfo.num_4k = list_len * (e_fmr->fmr_page_size / EHCA_PAGESIZE);
810 pginfo.page_list = page_list;
811 pginfo.next_4k = ((iova & (e_fmr->fmr_page_size-1)) /
812 EHCA_PAGESIZE);
813
814 ret = ehca_rereg_mr(shca, e_fmr, (u64*)iova,
815 list_len * e_fmr->fmr_page_size,
816 e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
817 if (ret)
818 goto map_phys_fmr_exit0;
819
820 /* successful reregistration */
821 e_fmr->fmr_map_cnt++;
822 e_fmr->ib.ib_fmr.lkey = tmp_lkey;
823 e_fmr->ib.ib_fmr.rkey = tmp_rkey;
824 return 0;
825
826map_phys_fmr_exit0:
827 if (ret)
828 ehca_err(fmr->device, "ret=%x fmr=%p page_list=%p list_len=%x "
829 "iova=%lx",
830 ret, fmr, page_list, list_len, iova);
831 return ret;
832} /* end ehca_map_phys_fmr() */
833
834/*----------------------------------------------------------------------*/
835
836int ehca_unmap_fmr(struct list_head *fmr_list)
837{
838 int ret = 0;
839 struct ib_fmr *ib_fmr;
840 struct ehca_shca *shca = NULL;
841 struct ehca_shca *prev_shca;
842 struct ehca_mr *e_fmr;
843 u32 num_fmr = 0;
844 u32 unmap_fmr_cnt = 0;
845
846 /* check all FMR belong to same SHCA, and check internal flag */
847 list_for_each_entry(ib_fmr, fmr_list, list) {
848 prev_shca = shca;
849 if (!ib_fmr) {
850 ehca_gen_err("bad fmr=%p in list", ib_fmr);
851 ret = -EINVAL;
852 goto unmap_fmr_exit0;
853 }
854 shca = container_of(ib_fmr->device, struct ehca_shca,
855 ib_device);
856 e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
857 if ((shca != prev_shca) && prev_shca) {
858 ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
859 "prev_shca=%p e_fmr=%p",
860 shca, prev_shca, e_fmr);
861 ret = -EINVAL;
862 goto unmap_fmr_exit0;
863 }
864 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
865 ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
866 "e_fmr->flags=%x", e_fmr, e_fmr->flags);
867 ret = -EINVAL;
868 goto unmap_fmr_exit0;
869 }
870 num_fmr++;
871 }
872
873 /* loop over all FMRs to unmap */
874 list_for_each_entry(ib_fmr, fmr_list, list) {
875 unmap_fmr_cnt++;
876 e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
877 shca = container_of(ib_fmr->device, struct ehca_shca,
878 ib_device);
879 ret = ehca_unmap_one_fmr(shca, e_fmr);
880 if (ret) {
881 /* unmap failed, stop unmapping of rest of FMRs */
882 ehca_err(&shca->ib_device, "unmap of one FMR failed, "
883 "stop rest, e_fmr=%p num_fmr=%x "
884 "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
885 unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
886 goto unmap_fmr_exit0;
887 }
888 }
889
890unmap_fmr_exit0:
891 if (ret)
892 ehca_gen_err("ret=%x fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
893 ret, fmr_list, num_fmr, unmap_fmr_cnt);
894 return ret;
895} /* end ehca_unmap_fmr() */
896
897/*----------------------------------------------------------------------*/
898
899int ehca_dealloc_fmr(struct ib_fmr *fmr)
900{
901 int ret;
902 u64 h_ret;
903 struct ehca_shca *shca =
904 container_of(fmr->device, struct ehca_shca, ib_device);
905 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
906
907 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
908 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
909 e_fmr, e_fmr->flags);
910 ret = -EINVAL;
911 goto free_fmr_exit0;
912 }
913
914 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
915 if (h_ret != H_SUCCESS) {
916 ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lx e_fmr=%p "
917 "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x",
918 h_ret, e_fmr, shca->ipz_hca_handle.handle,
919 e_fmr->ipz_mr_handle.handle, fmr->lkey);
920 ret = ehca_mrmw_map_hrc_free_mr(h_ret);
921 goto free_fmr_exit0;
922 }
923 /* successful deregistration */
924 ehca_mr_delete(e_fmr);
925 return 0;
926
927free_fmr_exit0:
928 if (ret)
929 ehca_err(&shca->ib_device, "ret=%x fmr=%p", ret, fmr);
930 return ret;
931} /* end ehca_dealloc_fmr() */
932
933/*----------------------------------------------------------------------*/
934
935int ehca_reg_mr(struct ehca_shca *shca,
936 struct ehca_mr *e_mr,
937 u64 *iova_start,
938 u64 size,
939 int acl,
940 struct ehca_pd *e_pd,
941 struct ehca_mr_pginfo *pginfo,
942 u32 *lkey, /*OUT*/
943 u32 *rkey) /*OUT*/
944{
945 int ret;
946 u64 h_ret;
947 u32 hipz_acl;
948 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
949
950 ehca_mrmw_map_acl(acl, &hipz_acl);
951 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
952 if (ehca_use_hp_mr == 1)
953 hipz_acl |= 0x00000001;
954
955 h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
956 (u64)iova_start, size, hipz_acl,
957 e_pd->fw_pd, &hipzout);
958 if (h_ret != H_SUCCESS) {
959 ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lx "
960 "hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle);
961 ret = ehca_mrmw_map_hrc_alloc(h_ret);
962 goto ehca_reg_mr_exit0;
963 }
964
965 e_mr->ipz_mr_handle = hipzout.handle;
966
967 ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
968 if (ret)
969 goto ehca_reg_mr_exit1;
970
971 /* successful registration */
972 e_mr->num_pages = pginfo->num_pages;
973 e_mr->num_4k = pginfo->num_4k;
974 e_mr->start = iova_start;
975 e_mr->size = size;
976 e_mr->acl = acl;
977 *lkey = hipzout.lkey;
978 *rkey = hipzout.rkey;
979 return 0;
980
981ehca_reg_mr_exit1:
982 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
983 if (h_ret != H_SUCCESS) {
984 ehca_err(&shca->ib_device, "h_ret=%lx shca=%p e_mr=%p "
985 "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x "
986 "pginfo=%p num_pages=%lx num_4k=%lx ret=%x",
987 h_ret, shca, e_mr, iova_start, size, acl, e_pd,
988 hipzout.lkey, pginfo, pginfo->num_pages,
989 pginfo->num_4k, ret);
990 ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
991 "not recoverable");
992 }
993ehca_reg_mr_exit0:
994 if (ret)
995 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
996 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
997 "num_pages=%lx num_4k=%lx",
998 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
999 pginfo->num_pages, pginfo->num_4k);
1000 return ret;
1001} /* end ehca_reg_mr() */
1002
1003/*----------------------------------------------------------------------*/
1004
1005int ehca_reg_mr_rpages(struct ehca_shca *shca,
1006 struct ehca_mr *e_mr,
1007 struct ehca_mr_pginfo *pginfo)
1008{
1009 int ret = 0;
1010 u64 h_ret;
1011 u32 rnum;
1012 u64 rpage;
1013 u32 i;
1014 u64 *kpage;
1015
1016 kpage = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
1017 if (!kpage) {
1018 ehca_err(&shca->ib_device, "kpage alloc failed");
1019 ret = -ENOMEM;
1020 goto ehca_reg_mr_rpages_exit0;
1021 }
1022
1023 /* max 512 pages per shot */
1024 for (i = 0; i < ((pginfo->num_4k + 512 - 1) / 512); i++) {
1025
1026 if (i == ((pginfo->num_4k + 512 - 1) / 512) - 1) {
1027 rnum = pginfo->num_4k % 512; /* last shot */
1028 if (rnum == 0)
1029 rnum = 512; /* last shot is full */
1030 } else
1031 rnum = 512;
1032
1033 if (rnum > 1) {
1034 ret = ehca_set_pagebuf(e_mr, pginfo, rnum, kpage);
1035 if (ret) {
1036 ehca_err(&shca->ib_device, "ehca_set_pagebuf "
1037 "bad rc, ret=%x rnum=%x kpage=%p",
1038 ret, rnum, kpage);
1039 ret = -EFAULT;
1040 goto ehca_reg_mr_rpages_exit1;
1041 }
1042 rpage = virt_to_abs(kpage);
1043 if (!rpage) {
1044 ehca_err(&shca->ib_device, "kpage=%p i=%x",
1045 kpage, i);
1046 ret = -EFAULT;
1047 goto ehca_reg_mr_rpages_exit1;
1048 }
1049 } else { /* rnum==1 */
1050 ret = ehca_set_pagebuf_1(e_mr, pginfo, &rpage);
1051 if (ret) {
1052 ehca_err(&shca->ib_device, "ehca_set_pagebuf_1 "
1053 "bad rc, ret=%x i=%x", ret, i);
1054 ret = -EFAULT;
1055 goto ehca_reg_mr_rpages_exit1;
1056 }
1057 }
1058
1059 h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, e_mr,
1060 0, /* pagesize 4k */
1061 0, rpage, rnum);
1062
1063 if (i == ((pginfo->num_4k + 512 - 1) / 512) - 1) {
1064 /*
1065 * check for 'registration complete'==H_SUCCESS
1066 * and for 'page registered'==H_PAGE_REGISTERED
1067 */
1068 if (h_ret != H_SUCCESS) {
1069 ehca_err(&shca->ib_device, "last "
1070 "hipz_reg_rpage_mr failed, h_ret=%lx "
1071 "e_mr=%p i=%x hca_hndl=%lx mr_hndl=%lx"
1072 " lkey=%x", h_ret, e_mr, i,
1073 shca->ipz_hca_handle.handle,
1074 e_mr->ipz_mr_handle.handle,
1075 e_mr->ib.ib_mr.lkey);
1076 ret = ehca_mrmw_map_hrc_rrpg_last(h_ret);
1077 break;
1078 } else
1079 ret = 0;
1080 } else if (h_ret != H_PAGE_REGISTERED) {
1081 ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
1082 "h_ret=%lx e_mr=%p i=%x lkey=%x hca_hndl=%lx "
1083 "mr_hndl=%lx", h_ret, e_mr, i,
1084 e_mr->ib.ib_mr.lkey,
1085 shca->ipz_hca_handle.handle,
1086 e_mr->ipz_mr_handle.handle);
1087 ret = ehca_mrmw_map_hrc_rrpg_notlast(h_ret);
1088 break;
1089 } else
1090 ret = 0;
1091 } /* end for(i) */
1092
1093
1094ehca_reg_mr_rpages_exit1:
1095 kfree(kpage);
1096ehca_reg_mr_rpages_exit0:
1097 if (ret)
1098 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p "
1099 "num_pages=%lx num_4k=%lx", ret, shca, e_mr, pginfo,
1100 pginfo->num_pages, pginfo->num_4k);
1101 return ret;
1102} /* end ehca_reg_mr_rpages() */
1103
1104/*----------------------------------------------------------------------*/
1105
1106inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
1107 struct ehca_mr *e_mr,
1108 u64 *iova_start,
1109 u64 size,
1110 u32 acl,
1111 struct ehca_pd *e_pd,
1112 struct ehca_mr_pginfo *pginfo,
1113 u32 *lkey, /*OUT*/
1114 u32 *rkey) /*OUT*/
1115{
1116 int ret;
1117 u64 h_ret;
1118 u32 hipz_acl;
1119 u64 *kpage;
1120 u64 rpage;
1121 struct ehca_mr_pginfo pginfo_save;
1122 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1123
1124 ehca_mrmw_map_acl(acl, &hipz_acl);
1125 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1126
1127 kpage = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
1128 if (!kpage) {
1129 ehca_err(&shca->ib_device, "kpage alloc failed");
1130 ret = -ENOMEM;
1131 goto ehca_rereg_mr_rereg1_exit0;
1132 }
1133
1134 pginfo_save = *pginfo;
1135 ret = ehca_set_pagebuf(e_mr, pginfo, pginfo->num_4k, kpage);
1136 if (ret) {
1137 ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
1138 "pginfo=%p type=%x num_pages=%lx num_4k=%lx kpage=%p",
1139 e_mr, pginfo, pginfo->type, pginfo->num_pages,
1140 pginfo->num_4k,kpage);
1141 goto ehca_rereg_mr_rereg1_exit1;
1142 }
1143 rpage = virt_to_abs(kpage);
1144 if (!rpage) {
1145 ehca_err(&shca->ib_device, "kpage=%p", kpage);
1146 ret = -EFAULT;
1147 goto ehca_rereg_mr_rereg1_exit1;
1148 }
1149 h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
1150 (u64)iova_start, size, hipz_acl,
1151 e_pd->fw_pd, rpage, &hipzout);
1152 if (h_ret != H_SUCCESS) {
1153 /*
1154 * reregistration unsuccessful, try it again with the 3 hCalls,
1155 * e.g. this is required in case H_MR_CONDITION
1156 * (MW bound or MR is shared)
1157 */
1158 ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
1159 "(Rereg1), h_ret=%lx e_mr=%p", h_ret, e_mr);
1160 *pginfo = pginfo_save;
1161 ret = -EAGAIN;
1162 } else if ((u64*)hipzout.vaddr != iova_start) {
1163 ehca_err(&shca->ib_device, "PHYP changed iova_start in "
1164 "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p "
1165 "mr_handle=%lx lkey=%x lkey_out=%x", iova_start,
1166 hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
1167 e_mr->ib.ib_mr.lkey, hipzout.lkey);
1168 ret = -EFAULT;
1169 } else {
1170 /*
1171 * successful reregistration
1172 * note: start and start_out are identical for eServer HCAs
1173 */
1174 e_mr->num_pages = pginfo->num_pages;
1175 e_mr->num_4k = pginfo->num_4k;
1176 e_mr->start = iova_start;
1177 e_mr->size = size;
1178 e_mr->acl = acl;
1179 *lkey = hipzout.lkey;
1180 *rkey = hipzout.rkey;
1181 }
1182
1183ehca_rereg_mr_rereg1_exit1:
1184 kfree(kpage);
1185ehca_rereg_mr_rereg1_exit0:
1186 if ( ret && (ret != -EAGAIN) )
1187 ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x "
1188 "pginfo=%p num_pages=%lx num_4k=%lx",
1189 ret, *lkey, *rkey, pginfo, pginfo->num_pages,
1190 pginfo->num_4k);
1191 return ret;
1192} /* end ehca_rereg_mr_rereg1() */
1193
1194/*----------------------------------------------------------------------*/
1195
1196int ehca_rereg_mr(struct ehca_shca *shca,
1197 struct ehca_mr *e_mr,
1198 u64 *iova_start,
1199 u64 size,
1200 int acl,
1201 struct ehca_pd *e_pd,
1202 struct ehca_mr_pginfo *pginfo,
1203 u32 *lkey,
1204 u32 *rkey)
1205{
1206 int ret = 0;
1207 u64 h_ret;
1208 int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
1209 int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
1210
1211 /* first determine reregistration hCall(s) */
1212 if ((pginfo->num_4k > 512) || (e_mr->num_4k > 512) ||
1213 (pginfo->num_4k > e_mr->num_4k)) {
1214 ehca_dbg(&shca->ib_device, "Rereg3 case, pginfo->num_4k=%lx "
1215 "e_mr->num_4k=%x", pginfo->num_4k, e_mr->num_4k);
1216 rereg_1_hcall = 0;
1217 rereg_3_hcall = 1;
1218 }
1219
1220 if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */
1221 rereg_1_hcall = 0;
1222 rereg_3_hcall = 1;
1223 e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
1224 ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
1225 e_mr);
1226 }
1227
1228 if (rereg_1_hcall) {
1229 ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
1230 acl, e_pd, pginfo, lkey, rkey);
1231 if (ret) {
1232 if (ret == -EAGAIN)
1233 rereg_3_hcall = 1;
1234 else
1235 goto ehca_rereg_mr_exit0;
1236 }
1237 }
1238
1239 if (rereg_3_hcall) {
1240 struct ehca_mr save_mr;
1241
1242 /* first deregister old MR */
1243 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
1244 if (h_ret != H_SUCCESS) {
1245 ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1246 "h_ret=%lx e_mr=%p hca_hndl=%lx mr_hndl=%lx "
1247 "mr->lkey=%x",
1248 h_ret, e_mr, shca->ipz_hca_handle.handle,
1249 e_mr->ipz_mr_handle.handle,
1250 e_mr->ib.ib_mr.lkey);
1251 ret = ehca_mrmw_map_hrc_free_mr(h_ret);
1252 goto ehca_rereg_mr_exit0;
1253 }
1254 /* clean ehca_mr_t, without changing struct ib_mr and lock */
1255 save_mr = *e_mr;
1256 ehca_mr_deletenew(e_mr);
1257
1258 /* set some MR values */
1259 e_mr->flags = save_mr.flags;
1260 e_mr->fmr_page_size = save_mr.fmr_page_size;
1261 e_mr->fmr_max_pages = save_mr.fmr_max_pages;
1262 e_mr->fmr_max_maps = save_mr.fmr_max_maps;
1263 e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
1264
1265 ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
1266 e_pd, pginfo, lkey, rkey);
1267 if (ret) {
1268 u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
1269 memcpy(&e_mr->flags, &(save_mr.flags),
1270 sizeof(struct ehca_mr) - offset);
1271 goto ehca_rereg_mr_exit0;
1272 }
1273 }
1274
1275ehca_rereg_mr_exit0:
1276 if (ret)
1277 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
1278 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
1279 "num_pages=%lx lkey=%x rkey=%x rereg_1_hcall=%x "
1280 "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
1281 acl, e_pd, pginfo, pginfo->num_pages, *lkey, *rkey,
1282 rereg_1_hcall, rereg_3_hcall);
1283 return ret;
1284} /* end ehca_rereg_mr() */
1285
1286/*----------------------------------------------------------------------*/
1287
1288int ehca_unmap_one_fmr(struct ehca_shca *shca,
1289 struct ehca_mr *e_fmr)
1290{
1291 int ret = 0;
1292 u64 h_ret;
1293 int rereg_1_hcall = 1; /* 1: use hipz_mr_reregister directly */
1294 int rereg_3_hcall = 0; /* 1: use 3 hipz calls for unmapping */
1295 struct ehca_pd *e_pd =
1296 container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
1297 struct ehca_mr save_fmr;
1298 u32 tmp_lkey, tmp_rkey;
1299 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
1300 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1301
1302 /* first check if reregistration hCall can be used for unmap */
1303 if (e_fmr->fmr_max_pages > 512) {
1304 rereg_1_hcall = 0;
1305 rereg_3_hcall = 1;
1306 }
1307
1308 if (rereg_1_hcall) {
1309 /*
1310 * note: after using rereg hcall with len=0,
1311 * rereg hcall must be used again for registering pages
1312 */
1313 h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
1314 0, 0, e_pd->fw_pd, 0, &hipzout);
1315 if (h_ret != H_SUCCESS) {
1316 /*
1317 * should not happen, because length checked above,
1318 * FMRs are not shared and no MW bound to FMRs
1319 */
1320 ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
1321 "(Rereg1), h_ret=%lx e_fmr=%p hca_hndl=%lx "
1322 "mr_hndl=%lx lkey=%x lkey_out=%x",
1323 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1324 e_fmr->ipz_mr_handle.handle,
1325 e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
1326 rereg_3_hcall = 1;
1327 } else {
1328 /* successful reregistration */
1329 e_fmr->start = NULL;
1330 e_fmr->size = 0;
1331 tmp_lkey = hipzout.lkey;
1332 tmp_rkey = hipzout.rkey;
1333 }
1334 }
1335
1336 if (rereg_3_hcall) {
1337 struct ehca_mr save_mr;
1338
1339 /* first free old FMR */
1340 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
1341 if (h_ret != H_SUCCESS) {
1342 ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1343 "h_ret=%lx e_fmr=%p hca_hndl=%lx mr_hndl=%lx "
1344 "lkey=%x",
1345 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1346 e_fmr->ipz_mr_handle.handle,
1347 e_fmr->ib.ib_fmr.lkey);
1348 ret = ehca_mrmw_map_hrc_free_mr(h_ret);
1349 goto ehca_unmap_one_fmr_exit0;
1350 }
1351 /* clean ehca_mr_t, without changing lock */
1352 save_fmr = *e_fmr;
1353 ehca_mr_deletenew(e_fmr);
1354
1355 /* set some MR values */
1356 e_fmr->flags = save_fmr.flags;
1357 e_fmr->fmr_page_size = save_fmr.fmr_page_size;
1358 e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
1359 e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
1360 e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
1361 e_fmr->acl = save_fmr.acl;
1362
1363 pginfo.type = EHCA_MR_PGI_FMR;
1364 pginfo.num_pages = 0;
1365 pginfo.num_4k = 0;
1366 ret = ehca_reg_mr(shca, e_fmr, NULL,
1367 (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
1368 e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
1369 &tmp_rkey);
1370 if (ret) {
1371 u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
1372 memcpy(&e_fmr->flags, &(save_mr.flags),
1373 sizeof(struct ehca_mr) - offset);
1374 goto ehca_unmap_one_fmr_exit0;
1375 }
1376 }
1377
1378ehca_unmap_one_fmr_exit0:
1379 if (ret)
1380 ehca_err(&shca->ib_device, "ret=%x tmp_lkey=%x tmp_rkey=%x "
1381 "fmr_max_pages=%x rereg_1_hcall=%x rereg_3_hcall=%x",
1382 ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages,
1383 rereg_1_hcall, rereg_3_hcall);
1384 return ret;
1385} /* end ehca_unmap_one_fmr() */
1386
1387/*----------------------------------------------------------------------*/
1388
1389int ehca_reg_smr(struct ehca_shca *shca,
1390 struct ehca_mr *e_origmr,
1391 struct ehca_mr *e_newmr,
1392 u64 *iova_start,
1393 int acl,
1394 struct ehca_pd *e_pd,
1395 u32 *lkey, /*OUT*/
1396 u32 *rkey) /*OUT*/
1397{
1398 int ret = 0;
1399 u64 h_ret;
1400 u32 hipz_acl;
1401 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1402
1403 ehca_mrmw_map_acl(acl, &hipz_acl);
1404 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1405
1406 h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1407 (u64)iova_start, hipz_acl, e_pd->fw_pd,
1408 &hipzout);
1409 if (h_ret != H_SUCCESS) {
1410 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
1411 "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
1412 "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
1413 h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
1414 shca->ipz_hca_handle.handle,
1415 e_origmr->ipz_mr_handle.handle,
1416 e_origmr->ib.ib_mr.lkey);
1417 ret = ehca_mrmw_map_hrc_reg_smr(h_ret);
1418 goto ehca_reg_smr_exit0;
1419 }
1420 /* successful registration */
1421 e_newmr->num_pages = e_origmr->num_pages;
1422 e_newmr->num_4k = e_origmr->num_4k;
1423 e_newmr->start = iova_start;
1424 e_newmr->size = e_origmr->size;
1425 e_newmr->acl = acl;
1426 e_newmr->ipz_mr_handle = hipzout.handle;
1427 *lkey = hipzout.lkey;
1428 *rkey = hipzout.rkey;
1429 return 0;
1430
1431ehca_reg_smr_exit0:
1432 if (ret)
1433 ehca_err(&shca->ib_device, "ret=%x shca=%p e_origmr=%p "
1434 "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
1435 ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
1436 return ret;
1437} /* end ehca_reg_smr() */
1438
1439/*----------------------------------------------------------------------*/
1440
1441/* register internal max-MR to internal SHCA */
1442int ehca_reg_internal_maxmr(
1443 struct ehca_shca *shca,
1444 struct ehca_pd *e_pd,
1445 struct ehca_mr **e_maxmr) /*OUT*/
1446{
1447 int ret;
1448 struct ehca_mr *e_mr;
1449 u64 *iova_start;
1450 u64 size_maxmr;
1451 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
1452 struct ib_phys_buf ib_pbuf;
1453 u32 num_pages_mr;
1454 u32 num_pages_4k; /* 4k portion "pages" */
1455
1456 e_mr = ehca_mr_new();
1457 if (!e_mr) {
1458 ehca_err(&shca->ib_device, "out of memory");
1459 ret = -ENOMEM;
1460 goto ehca_reg_internal_maxmr_exit0;
1461 }
1462 e_mr->flags |= EHCA_MR_FLAG_MAXMR;
1463
1464 /* register internal max-MR on HCA */
1465 size_maxmr = (u64)high_memory - PAGE_OFFSET;
1466 iova_start = (u64*)KERNELBASE;
1467 ib_pbuf.addr = 0;
1468 ib_pbuf.size = size_maxmr;
1469 num_pages_mr = ((((u64)iova_start % PAGE_SIZE) + size_maxmr +
1470 PAGE_SIZE - 1) / PAGE_SIZE);
1471 num_pages_4k = ((((u64)iova_start % EHCA_PAGESIZE) + size_maxmr +
1472 EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
1473
1474 pginfo.type = EHCA_MR_PGI_PHYS;
1475 pginfo.num_pages = num_pages_mr;
1476 pginfo.num_4k = num_pages_4k;
1477 pginfo.num_phys_buf = 1;
1478 pginfo.phys_buf_array = &ib_pbuf;
1479
1480 ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
1481 &pginfo, &e_mr->ib.ib_mr.lkey,
1482 &e_mr->ib.ib_mr.rkey);
1483 if (ret) {
1484 ehca_err(&shca->ib_device, "reg of internal max MR failed, "
1485 "e_mr=%p iova_start=%p size_maxmr=%lx num_pages_mr=%x "
1486 "num_pages_4k=%x", e_mr, iova_start, size_maxmr,
1487 num_pages_mr, num_pages_4k);
1488 goto ehca_reg_internal_maxmr_exit1;
1489 }
1490
1491 /* successful registration of all pages */
1492 e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
1493 e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
1494 e_mr->ib.ib_mr.uobject = NULL;
1495 atomic_inc(&(e_pd->ib_pd.usecnt));
1496 atomic_set(&(e_mr->ib.ib_mr.usecnt), 0);
1497 *e_maxmr = e_mr;
1498 return 0;
1499
1500ehca_reg_internal_maxmr_exit1:
1501 ehca_mr_delete(e_mr);
1502ehca_reg_internal_maxmr_exit0:
1503 if (ret)
1504 ehca_err(&shca->ib_device, "ret=%x shca=%p e_pd=%p e_maxmr=%p",
1505 ret, shca, e_pd, e_maxmr);
1506 return ret;
1507} /* end ehca_reg_internal_maxmr() */
1508
1509/*----------------------------------------------------------------------*/
1510
1511int ehca_reg_maxmr(struct ehca_shca *shca,
1512 struct ehca_mr *e_newmr,
1513 u64 *iova_start,
1514 int acl,
1515 struct ehca_pd *e_pd,
1516 u32 *lkey,
1517 u32 *rkey)
1518{
1519 u64 h_ret;
1520 struct ehca_mr *e_origmr = shca->maxmr;
1521 u32 hipz_acl;
1522 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1523
1524 ehca_mrmw_map_acl(acl, &hipz_acl);
1525 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1526
1527 h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1528 (u64)iova_start, hipz_acl, e_pd->fw_pd,
1529 &hipzout);
1530 if (h_ret != H_SUCCESS) {
1531 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
1532 "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
1533 h_ret, e_origmr, shca->ipz_hca_handle.handle,
1534 e_origmr->ipz_mr_handle.handle,
1535 e_origmr->ib.ib_mr.lkey);
1536 return ehca_mrmw_map_hrc_reg_smr(h_ret);
1537 }
1538 /* successful registration */
1539 e_newmr->num_pages = e_origmr->num_pages;
1540 e_newmr->num_4k = e_origmr->num_4k;
1541 e_newmr->start = iova_start;
1542 e_newmr->size = e_origmr->size;
1543 e_newmr->acl = acl;
1544 e_newmr->ipz_mr_handle = hipzout.handle;
1545 *lkey = hipzout.lkey;
1546 *rkey = hipzout.rkey;
1547 return 0;
1548} /* end ehca_reg_maxmr() */
1549
1550/*----------------------------------------------------------------------*/
1551
1552int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
1553{
1554 int ret;
1555 struct ehca_mr *e_maxmr;
1556 struct ib_pd *ib_pd;
1557
1558 if (!shca->maxmr) {
1559 ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
1560 ret = -EINVAL;
1561 goto ehca_dereg_internal_maxmr_exit0;
1562 }
1563
1564 e_maxmr = shca->maxmr;
1565 ib_pd = e_maxmr->ib.ib_mr.pd;
1566 shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
1567
1568 ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
1569 if (ret) {
1570 ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
1571 "ret=%x e_maxmr=%p shca=%p lkey=%x",
1572 ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
1573 shca->maxmr = e_maxmr;
1574 goto ehca_dereg_internal_maxmr_exit0;
1575 }
1576
1577 atomic_dec(&ib_pd->usecnt);
1578
1579ehca_dereg_internal_maxmr_exit0:
1580 if (ret)
1581 ehca_err(&shca->ib_device, "ret=%x shca=%p shca->maxmr=%p",
1582 ret, shca, shca->maxmr);
1583 return ret;
1584} /* end ehca_dereg_internal_maxmr() */
1585
1586/*----------------------------------------------------------------------*/
1587
1588/*
1589 * check physical buffer array of MR verbs for validness and
1590 * calculates MR size
1591 */
1592int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array,
1593 int num_phys_buf,
1594 u64 *iova_start,
1595 u64 *size)
1596{
1597 struct ib_phys_buf *pbuf = phys_buf_array;
1598 u64 size_count = 0;
1599 u32 i;
1600
1601 if (num_phys_buf == 0) {
1602 ehca_gen_err("bad phys buf array len, num_phys_buf=0");
1603 return -EINVAL;
1604 }
1605 /* check first buffer */
1606 if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) {
1607 ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
1608 "pbuf->addr=%lx pbuf->size=%lx",
1609 iova_start, pbuf->addr, pbuf->size);
1610 return -EINVAL;
1611 }
1612 if (((pbuf->addr + pbuf->size) % PAGE_SIZE) &&
1613 (num_phys_buf > 1)) {
1614 ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx "
1615 "pbuf->size=%lx", pbuf->addr, pbuf->size);
1616 return -EINVAL;
1617 }
1618
1619 for (i = 0; i < num_phys_buf; i++) {
1620 if ((i > 0) && (pbuf->addr % PAGE_SIZE)) {
1621 ehca_gen_err("bad address, i=%x pbuf->addr=%lx "
1622 "pbuf->size=%lx",
1623 i, pbuf->addr, pbuf->size);
1624 return -EINVAL;
1625 }
1626 if (((i > 0) && /* not 1st */
1627 (i < (num_phys_buf - 1)) && /* not last */
1628 (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) {
1629 ehca_gen_err("bad size, i=%x pbuf->size=%lx",
1630 i, pbuf->size);
1631 return -EINVAL;
1632 }
1633 size_count += pbuf->size;
1634 pbuf++;
1635 }
1636
1637 *size = size_count;
1638 return 0;
1639} /* end ehca_mr_chk_buf_and_calc_size() */
1640
1641/*----------------------------------------------------------------------*/
1642
1643/* check page list of map FMR verb for validness */
1644int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
1645 u64 *page_list,
1646 int list_len)
1647{
1648 u32 i;
1649 u64 *page;
1650
1651 if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
1652 ehca_gen_err("bad list_len, list_len=%x "
1653 "e_fmr->fmr_max_pages=%x fmr=%p",
1654 list_len, e_fmr->fmr_max_pages, e_fmr);
1655 return -EINVAL;
1656 }
1657
1658 /* each page must be aligned */
1659 page = page_list;
1660 for (i = 0; i < list_len; i++) {
1661 if (*page % e_fmr->fmr_page_size) {
1662 ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p "
1663 "fmr_page_size=%x", i, *page, page, e_fmr,
1664 e_fmr->fmr_page_size);
1665 return -EINVAL;
1666 }
1667 page++;
1668 }
1669
1670 return 0;
1671} /* end ehca_fmr_check_page_list() */
1672
1673/*----------------------------------------------------------------------*/
1674
1675/* setup page buffer from page info */
1676int ehca_set_pagebuf(struct ehca_mr *e_mr,
1677 struct ehca_mr_pginfo *pginfo,
1678 u32 number,
1679 u64 *kpage)
1680{
1681 int ret = 0;
1682 struct ib_umem_chunk *prev_chunk;
1683 struct ib_umem_chunk *chunk;
1684 struct ib_phys_buf *pbuf;
1685 u64 *fmrlist;
1686 u64 num4k, pgaddr, offs4k;
1687 u32 i = 0;
1688 u32 j = 0;
1689
1690 if (pginfo->type == EHCA_MR_PGI_PHYS) {
1691 /* loop over desired phys_buf_array entries */
1692 while (i < number) {
1693 pbuf = pginfo->phys_buf_array + pginfo->next_buf;
1694 num4k = ((pbuf->addr % EHCA_PAGESIZE) + pbuf->size +
1695 EHCA_PAGESIZE - 1) / EHCA_PAGESIZE;
1696 offs4k = (pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
1697 while (pginfo->next_4k < offs4k + num4k) {
1698 /* sanity check */
1699 if ((pginfo->page_cnt >= pginfo->num_pages) ||
1700 (pginfo->page_4k_cnt >= pginfo->num_4k)) {
1701 ehca_gen_err("page_cnt >= num_pages, "
1702 "page_cnt=%lx "
1703 "num_pages=%lx "
1704 "page_4k_cnt=%lx "
1705 "num_4k=%lx i=%x",
1706 pginfo->page_cnt,
1707 pginfo->num_pages,
1708 pginfo->page_4k_cnt,
1709 pginfo->num_4k, i);
1710 ret = -EFAULT;
1711 goto ehca_set_pagebuf_exit0;
1712 }
1713 *kpage = phys_to_abs(
1714 (pbuf->addr & EHCA_PAGEMASK)
1715 + (pginfo->next_4k * EHCA_PAGESIZE));
1716 if ( !(*kpage) && pbuf->addr ) {
1717 ehca_gen_err("pbuf->addr=%lx "
1718 "pbuf->size=%lx "
1719 "next_4k=%lx", pbuf->addr,
1720 pbuf->size,
1721 pginfo->next_4k);
1722 ret = -EFAULT;
1723 goto ehca_set_pagebuf_exit0;
1724 }
1725 (pginfo->page_4k_cnt)++;
1726 (pginfo->next_4k)++;
1727 if (pginfo->next_4k %
1728 (PAGE_SIZE / EHCA_PAGESIZE) == 0)
1729 (pginfo->page_cnt)++;
1730 kpage++;
1731 i++;
1732 if (i >= number) break;
1733 }
1734 if (pginfo->next_4k >= offs4k + num4k) {
1735 (pginfo->next_buf)++;
1736 pginfo->next_4k = 0;
1737 }
1738 }
1739 } else if (pginfo->type == EHCA_MR_PGI_USER) {
1740 /* loop over desired chunk entries */
1741 chunk = pginfo->next_chunk;
1742 prev_chunk = pginfo->next_chunk;
1743 list_for_each_entry_continue(chunk,
1744 (&(pginfo->region->chunk_list)),
1745 list) {
1746 for (i = pginfo->next_nmap; i < chunk->nmap; ) {
1747 pgaddr = ( page_to_pfn(chunk->page_list[i].page)
1748 << PAGE_SHIFT );
1749 *kpage = phys_to_abs(pgaddr +
1750 (pginfo->next_4k *
1751 EHCA_PAGESIZE));
1752 if ( !(*kpage) ) {
1753 ehca_gen_err("pgaddr=%lx "
1754 "chunk->page_list[i]=%lx "
1755 "i=%x next_4k=%lx mr=%p",
1756 pgaddr,
1757 (u64)sg_dma_address(
1758 &chunk->
1759 page_list[i]),
1760 i, pginfo->next_4k, e_mr);
1761 ret = -EFAULT;
1762 goto ehca_set_pagebuf_exit0;
1763 }
1764 (pginfo->page_4k_cnt)++;
1765 (pginfo->next_4k)++;
1766 kpage++;
1767 if (pginfo->next_4k %
1768 (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
1769 (pginfo->page_cnt)++;
1770 (pginfo->next_nmap)++;
1771 pginfo->next_4k = 0;
1772 i++;
1773 }
1774 j++;
1775 if (j >= number) break;
1776 }
1777 if ((pginfo->next_nmap >= chunk->nmap) &&
1778 (j >= number)) {
1779 pginfo->next_nmap = 0;
1780 prev_chunk = chunk;
1781 break;
1782 } else if (pginfo->next_nmap >= chunk->nmap) {
1783 pginfo->next_nmap = 0;
1784 prev_chunk = chunk;
1785 } else if (j >= number)
1786 break;
1787 else
1788 prev_chunk = chunk;
1789 }
1790 pginfo->next_chunk =
1791 list_prepare_entry(prev_chunk,
1792 (&(pginfo->region->chunk_list)),
1793 list);
1794 } else if (pginfo->type == EHCA_MR_PGI_FMR) {
1795 /* loop over desired page_list entries */
1796 fmrlist = pginfo->page_list + pginfo->next_listelem;
1797 for (i = 0; i < number; i++) {
1798 *kpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
1799 pginfo->next_4k * EHCA_PAGESIZE);
1800 if ( !(*kpage) ) {
1801 ehca_gen_err("*fmrlist=%lx fmrlist=%p "
1802 "next_listelem=%lx next_4k=%lx",
1803 *fmrlist, fmrlist,
1804 pginfo->next_listelem,
1805 pginfo->next_4k);
1806 ret = -EFAULT;
1807 goto ehca_set_pagebuf_exit0;
1808 }
1809 (pginfo->page_4k_cnt)++;
1810 (pginfo->next_4k)++;
1811 kpage++;
1812 if (pginfo->next_4k %
1813 (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
1814 (pginfo->page_cnt)++;
1815 (pginfo->next_listelem)++;
1816 fmrlist++;
1817 pginfo->next_4k = 0;
1818 }
1819 }
1820 } else {
1821 ehca_gen_err("bad pginfo->type=%x", pginfo->type);
1822 ret = -EFAULT;
1823 goto ehca_set_pagebuf_exit0;
1824 }
1825
1826ehca_set_pagebuf_exit0:
1827 if (ret)
1828 ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
1829 "num_4k=%lx next_buf=%lx next_4k=%lx number=%x "
1830 "kpage=%p page_cnt=%lx page_4k_cnt=%lx i=%x "
1831 "next_listelem=%lx region=%p next_chunk=%p "
1832 "next_nmap=%lx", ret, e_mr, pginfo, pginfo->type,
1833 pginfo->num_pages, pginfo->num_4k,
1834 pginfo->next_buf, pginfo->next_4k, number, kpage,
1835 pginfo->page_cnt, pginfo->page_4k_cnt, i,
1836 pginfo->next_listelem, pginfo->region,
1837 pginfo->next_chunk, pginfo->next_nmap);
1838 return ret;
1839} /* end ehca_set_pagebuf() */
1840
1841/*----------------------------------------------------------------------*/
1842
1843/* setup 1 page from page info page buffer */
1844int ehca_set_pagebuf_1(struct ehca_mr *e_mr,
1845 struct ehca_mr_pginfo *pginfo,
1846 u64 *rpage)
1847{
1848 int ret = 0;
1849 struct ib_phys_buf *tmp_pbuf;
1850 u64 *fmrlist;
1851 struct ib_umem_chunk *chunk;
1852 struct ib_umem_chunk *prev_chunk;
1853 u64 pgaddr, num4k, offs4k;
1854
1855 if (pginfo->type == EHCA_MR_PGI_PHYS) {
1856 /* sanity check */
1857 if ((pginfo->page_cnt >= pginfo->num_pages) ||
1858 (pginfo->page_4k_cnt >= pginfo->num_4k)) {
1859 ehca_gen_err("page_cnt >= num_pages, page_cnt=%lx "
1860 "num_pages=%lx page_4k_cnt=%lx num_4k=%lx",
1861 pginfo->page_cnt, pginfo->num_pages,
1862 pginfo->page_4k_cnt, pginfo->num_4k);
1863 ret = -EFAULT;
1864 goto ehca_set_pagebuf_1_exit0;
1865 }
1866 tmp_pbuf = pginfo->phys_buf_array + pginfo->next_buf;
1867 num4k = ((tmp_pbuf->addr % EHCA_PAGESIZE) + tmp_pbuf->size +
1868 EHCA_PAGESIZE - 1) / EHCA_PAGESIZE;
1869 offs4k = (tmp_pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
1870 *rpage = phys_to_abs((tmp_pbuf->addr & EHCA_PAGEMASK) +
1871 (pginfo->next_4k * EHCA_PAGESIZE));
1872 if ( !(*rpage) && tmp_pbuf->addr ) {
1873 ehca_gen_err("tmp_pbuf->addr=%lx"
1874 " tmp_pbuf->size=%lx next_4k=%lx",
1875 tmp_pbuf->addr, tmp_pbuf->size,
1876 pginfo->next_4k);
1877 ret = -EFAULT;
1878 goto ehca_set_pagebuf_1_exit0;
1879 }
1880 (pginfo->page_4k_cnt)++;
1881 (pginfo->next_4k)++;
1882 if (pginfo->next_4k % (PAGE_SIZE / EHCA_PAGESIZE) == 0)
1883 (pginfo->page_cnt)++;
1884 if (pginfo->next_4k >= offs4k + num4k) {
1885 (pginfo->next_buf)++;
1886 pginfo->next_4k = 0;
1887 }
1888 } else if (pginfo->type == EHCA_MR_PGI_USER) {
1889 chunk = pginfo->next_chunk;
1890 prev_chunk = pginfo->next_chunk;
1891 list_for_each_entry_continue(chunk,
1892 (&(pginfo->region->chunk_list)),
1893 list) {
1894 pgaddr = ( page_to_pfn(chunk->page_list[
1895 pginfo->next_nmap].page)
1896 << PAGE_SHIFT);
1897 *rpage = phys_to_abs(pgaddr +
1898 (pginfo->next_4k * EHCA_PAGESIZE));
1899 if ( !(*rpage) ) {
1900 ehca_gen_err("pgaddr=%lx chunk->page_list[]=%lx"
1901 " next_nmap=%lx next_4k=%lx mr=%p",
1902 pgaddr, (u64)sg_dma_address(
1903 &chunk->page_list[
1904 pginfo->
1905 next_nmap]),
1906 pginfo->next_nmap, pginfo->next_4k,
1907 e_mr);
1908 ret = -EFAULT;
1909 goto ehca_set_pagebuf_1_exit0;
1910 }
1911 (pginfo->page_4k_cnt)++;
1912 (pginfo->next_4k)++;
1913 if (pginfo->next_4k %
1914 (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
1915 (pginfo->page_cnt)++;
1916 (pginfo->next_nmap)++;
1917 pginfo->next_4k = 0;
1918 }
1919 if (pginfo->next_nmap >= chunk->nmap) {
1920 pginfo->next_nmap = 0;
1921 prev_chunk = chunk;
1922 }
1923 break;
1924 }
1925 pginfo->next_chunk =
1926 list_prepare_entry(prev_chunk,
1927 (&(pginfo->region->chunk_list)),
1928 list);
1929 } else if (pginfo->type == EHCA_MR_PGI_FMR) {
1930 fmrlist = pginfo->page_list + pginfo->next_listelem;
1931 *rpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
1932 pginfo->next_4k * EHCA_PAGESIZE);
1933 if ( !(*rpage) ) {
1934 ehca_gen_err("*fmrlist=%lx fmrlist=%p "
1935 "next_listelem=%lx next_4k=%lx",
1936 *fmrlist, fmrlist, pginfo->next_listelem,
1937 pginfo->next_4k);
1938 ret = -EFAULT;
1939 goto ehca_set_pagebuf_1_exit0;
1940 }
1941 (pginfo->page_4k_cnt)++;
1942 (pginfo->next_4k)++;
1943 if (pginfo->next_4k %
1944 (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
1945 (pginfo->page_cnt)++;
1946 (pginfo->next_listelem)++;
1947 pginfo->next_4k = 0;
1948 }
1949 } else {
1950 ehca_gen_err("bad pginfo->type=%x", pginfo->type);
1951 ret = -EFAULT;
1952 goto ehca_set_pagebuf_1_exit0;
1953 }
1954
1955ehca_set_pagebuf_1_exit0:
1956 if (ret)
1957 ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
1958 "num_4k=%lx next_buf=%lx next_4k=%lx rpage=%p "
1959 "page_cnt=%lx page_4k_cnt=%lx next_listelem=%lx "
1960 "region=%p next_chunk=%p next_nmap=%lx", ret, e_mr,
1961 pginfo, pginfo->type, pginfo->num_pages,
1962 pginfo->num_4k, pginfo->next_buf, pginfo->next_4k,
1963 rpage, pginfo->page_cnt, pginfo->page_4k_cnt,
1964 pginfo->next_listelem, pginfo->region,
1965 pginfo->next_chunk, pginfo->next_nmap);
1966 return ret;
1967} /* end ehca_set_pagebuf_1() */
1968
1969/*----------------------------------------------------------------------*/
1970
1971/*
1972 * check MR if it is a max-MR, i.e. uses whole memory
1973 * in case it's a max-MR 1 is returned, else 0
1974 */
1975int ehca_mr_is_maxmr(u64 size,
1976 u64 *iova_start)
1977{
1978 /* a MR is treated as max-MR only if it fits following: */
1979 if ((size == ((u64)high_memory - PAGE_OFFSET)) &&
1980 (iova_start == (void*)KERNELBASE)) {
1981 ehca_gen_dbg("this is a max-MR");
1982 return 1;
1983 } else
1984 return 0;
1985} /* end ehca_mr_is_maxmr() */
1986
1987/*----------------------------------------------------------------------*/
1988
1989/* map access control for MR/MW. This routine is used for MR and MW. */
1990void ehca_mrmw_map_acl(int ib_acl,
1991 u32 *hipz_acl)
1992{
1993 *hipz_acl = 0;
1994 if (ib_acl & IB_ACCESS_REMOTE_READ)
1995 *hipz_acl |= HIPZ_ACCESSCTRL_R_READ;
1996 if (ib_acl & IB_ACCESS_REMOTE_WRITE)
1997 *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE;
1998 if (ib_acl & IB_ACCESS_REMOTE_ATOMIC)
1999 *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC;
2000 if (ib_acl & IB_ACCESS_LOCAL_WRITE)
2001 *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE;
2002 if (ib_acl & IB_ACCESS_MW_BIND)
2003 *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;
2004} /* end ehca_mrmw_map_acl() */
2005
2006/*----------------------------------------------------------------------*/
2007
2008/* sets page size in hipz access control for MR/MW. */
2009void ehca_mrmw_set_pgsize_hipz_acl(u32 *hipz_acl) /*INOUT*/
2010{
2011 return; /* HCA supports only 4k */
2012} /* end ehca_mrmw_set_pgsize_hipz_acl() */
2013
2014/*----------------------------------------------------------------------*/
2015
2016/*
2017 * reverse map access control for MR/MW.
2018 * This routine is used for MR and MW.
2019 */
2020void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
2021 int *ib_acl) /*OUT*/
2022{
2023 *ib_acl = 0;
2024 if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ)
2025 *ib_acl |= IB_ACCESS_REMOTE_READ;
2026 if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE)
2027 *ib_acl |= IB_ACCESS_REMOTE_WRITE;
2028 if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC)
2029 *ib_acl |= IB_ACCESS_REMOTE_ATOMIC;
2030 if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE)
2031 *ib_acl |= IB_ACCESS_LOCAL_WRITE;
2032 if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND)
2033 *ib_acl |= IB_ACCESS_MW_BIND;
2034} /* end ehca_mrmw_reverse_map_acl() */
2035
2036
2037/*----------------------------------------------------------------------*/
2038
2039/*
2040 * map HIPZ rc to IB retcodes for MR/MW allocations
2041 * Used for hipz_mr_reg_alloc and hipz_mw_alloc.
2042 */
2043int ehca_mrmw_map_hrc_alloc(const u64 hipz_rc)
2044{
2045 switch (hipz_rc) {
2046 case H_SUCCESS: /* successful completion */
2047 return 0;
2048 case H_ADAPTER_PARM: /* invalid adapter handle */
2049 case H_RT_PARM: /* invalid resource type */
2050 case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
2051 case H_MLENGTH_PARM: /* invalid memory length */
2052 case H_MEM_ACCESS_PARM: /* invalid access controls */
2053 case H_CONSTRAINED: /* resource constraint */
2054 return -EINVAL;
2055 case H_BUSY: /* long busy */
2056 return -EBUSY;
2057 default:
2058 return -EINVAL;
2059 }
2060} /* end ehca_mrmw_map_hrc_alloc() */
2061
2062/*----------------------------------------------------------------------*/
2063
2064/*
2065 * map HIPZ rc to IB retcodes for MR register rpage
2066 * Used for hipz_h_register_rpage_mr at registering last page
2067 */
2068int ehca_mrmw_map_hrc_rrpg_last(const u64 hipz_rc)
2069{
2070 switch (hipz_rc) {
2071 case H_SUCCESS: /* registration complete */
2072 return 0;
2073 case H_PAGE_REGISTERED: /* page registered */
2074 case H_ADAPTER_PARM: /* invalid adapter handle */
2075 case H_RH_PARM: /* invalid resource handle */
2076/* case H_QT_PARM: invalid queue type */
2077 case H_PARAMETER: /*
2078 * invalid logical address,
2079 * or count zero or greater 512
2080 */
2081 case H_TABLE_FULL: /* page table full */
2082 case H_HARDWARE: /* HCA not operational */
2083 return -EINVAL;
2084 case H_BUSY: /* long busy */
2085 return -EBUSY;
2086 default:
2087 return -EINVAL;
2088 }
2089} /* end ehca_mrmw_map_hrc_rrpg_last() */
2090
2091/*----------------------------------------------------------------------*/
2092
2093/*
2094 * map HIPZ rc to IB retcodes for MR register rpage
2095 * Used for hipz_h_register_rpage_mr at registering one page, but not last page
2096 */
2097int ehca_mrmw_map_hrc_rrpg_notlast(const u64 hipz_rc)
2098{
2099 switch (hipz_rc) {
2100 case H_PAGE_REGISTERED: /* page registered */
2101 return 0;
2102 case H_SUCCESS: /* registration complete */
2103 case H_ADAPTER_PARM: /* invalid adapter handle */
2104 case H_RH_PARM: /* invalid resource handle */
2105/* case H_QT_PARM: invalid queue type */
2106 case H_PARAMETER: /*
2107 * invalid logical address,
2108 * or count zero or greater 512
2109 */
2110 case H_TABLE_FULL: /* page table full */
2111 case H_HARDWARE: /* HCA not operational */
2112 return -EINVAL;
2113 case H_BUSY: /* long busy */
2114 return -EBUSY;
2115 default:
2116 return -EINVAL;
2117 }
2118} /* end ehca_mrmw_map_hrc_rrpg_notlast() */
2119
2120/*----------------------------------------------------------------------*/
2121
2122/* map HIPZ rc to IB retcodes for MR query. Used for hipz_mr_query. */
2123int ehca_mrmw_map_hrc_query_mr(const u64 hipz_rc)
2124{
2125 switch (hipz_rc) {
2126 case H_SUCCESS: /* successful completion */
2127 return 0;
2128 case H_ADAPTER_PARM: /* invalid adapter handle */
2129 case H_RH_PARM: /* invalid resource handle */
2130 return -EINVAL;
2131 case H_BUSY: /* long busy */
2132 return -EBUSY;
2133 default:
2134 return -EINVAL;
2135 }
2136} /* end ehca_mrmw_map_hrc_query_mr() */
2137
2138/*----------------------------------------------------------------------*/
2139/*----------------------------------------------------------------------*/
2140
2141/*
2142 * map HIPZ rc to IB retcodes for freeing MR resource
2143 * Used for hipz_h_free_resource_mr
2144 */
2145int ehca_mrmw_map_hrc_free_mr(const u64 hipz_rc)
2146{
2147 switch (hipz_rc) {
2148 case H_SUCCESS: /* resource freed */
2149 return 0;
2150 case H_ADAPTER_PARM: /* invalid adapter handle */
2151 case H_RH_PARM: /* invalid resource handle */
2152 case H_R_STATE: /* invalid resource state */
2153 case H_HARDWARE: /* HCA not operational */
2154 return -EINVAL;
2155 case H_RESOURCE: /* Resource in use */
2156 case H_BUSY: /* long busy */
2157 return -EBUSY;
2158 default:
2159 return -EINVAL;
2160 }
2161} /* end ehca_mrmw_map_hrc_free_mr() */
2162
2163/*----------------------------------------------------------------------*/
2164
2165/*
2166 * map HIPZ rc to IB retcodes for freeing MW resource
2167 * Used for hipz_h_free_resource_mw
2168 */
2169int ehca_mrmw_map_hrc_free_mw(const u64 hipz_rc)
2170{
2171 switch (hipz_rc) {
2172 case H_SUCCESS: /* resource freed */
2173 return 0;
2174 case H_ADAPTER_PARM: /* invalid adapter handle */
2175 case H_RH_PARM: /* invalid resource handle */
2176 case H_R_STATE: /* invalid resource state */
2177 case H_HARDWARE: /* HCA not operational */
2178 return -EINVAL;
2179 case H_RESOURCE: /* Resource in use */
2180 case H_BUSY: /* long busy */
2181 return -EBUSY;
2182 default:
2183 return -EINVAL;
2184 }
2185} /* end ehca_mrmw_map_hrc_free_mw() */
2186
2187/*----------------------------------------------------------------------*/
2188
2189/*
2190 * map HIPZ rc to IB retcodes for SMR registrations
2191 * Used for hipz_h_register_smr.
2192 */
2193int ehca_mrmw_map_hrc_reg_smr(const u64 hipz_rc)
2194{
2195 switch (hipz_rc) {
2196 case H_SUCCESS: /* successful completion */
2197 return 0;
2198 case H_ADAPTER_PARM: /* invalid adapter handle */
2199 case H_RH_PARM: /* invalid resource handle */
2200 case H_MEM_PARM: /* invalid MR virtual address */
2201 case H_MEM_ACCESS_PARM: /* invalid access controls */
2202 case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
2203 return -EINVAL;
2204 case H_BUSY: /* long busy */
2205 return -EBUSY;
2206 default:
2207 return -EINVAL;
2208 }
2209} /* end ehca_mrmw_map_hrc_reg_smr() */
2210
2211/*----------------------------------------------------------------------*/
2212
2213/*
2214 * MR destructor and constructor
2215 * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
2216 * except struct ib_mr and spinlock
2217 */
2218void ehca_mr_deletenew(struct ehca_mr *mr)
2219{
2220 mr->flags = 0;
2221 mr->num_pages = 0;
2222 mr->num_4k = 0;
2223 mr->acl = 0;
2224 mr->start = NULL;
2225 mr->fmr_page_size = 0;
2226 mr->fmr_max_pages = 0;
2227 mr->fmr_max_maps = 0;
2228 mr->fmr_map_cnt = 0;
2229 memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
2230 memset(&mr->galpas, 0, sizeof(mr->galpas));
2231 mr->nr_of_pages = 0;
2232 mr->pagearray = NULL;
2233} /* end ehca_mr_deletenew() */
2234
2235int ehca_init_mrmw_cache(void)
2236{
2237 mr_cache = kmem_cache_create("ehca_cache_mr",
2238 sizeof(struct ehca_mr), 0,
2239 SLAB_HWCACHE_ALIGN,
2240 NULL, NULL);
2241 if (!mr_cache)
2242 return -ENOMEM;
2243 mw_cache = kmem_cache_create("ehca_cache_mw",
2244 sizeof(struct ehca_mw), 0,
2245 SLAB_HWCACHE_ALIGN,
2246 NULL, NULL);
2247 if (!mw_cache) {
2248 kmem_cache_destroy(mr_cache);
2249 mr_cache = NULL;
2250 return -ENOMEM;
2251 }
2252 return 0;
2253}
2254
2255void ehca_cleanup_mrmw_cache(void)
2256{
2257 if (mr_cache)
2258 kmem_cache_destroy(mr_cache);
2259 if (mw_cache)
2260 kmem_cache_destroy(mw_cache);
2261}
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.h b/drivers/infiniband/hw/ehca/ehca_mrmw.h
new file mode 100644
index 000000000000..d936e40a5748
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.h
@@ -0,0 +1,140 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * MR/MW declarations and inline functions
5 *
6 * Authors: Dietmar Decker <ddecker@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#ifndef _EHCA_MRMW_H_
43#define _EHCA_MRMW_H_
44
45int ehca_reg_mr(struct ehca_shca *shca,
46 struct ehca_mr *e_mr,
47 u64 *iova_start,
48 u64 size,
49 int acl,
50 struct ehca_pd *e_pd,
51 struct ehca_mr_pginfo *pginfo,
52 u32 *lkey,
53 u32 *rkey);
54
55int ehca_reg_mr_rpages(struct ehca_shca *shca,
56 struct ehca_mr *e_mr,
57 struct ehca_mr_pginfo *pginfo);
58
59int ehca_rereg_mr(struct ehca_shca *shca,
60 struct ehca_mr *e_mr,
61 u64 *iova_start,
62 u64 size,
63 int mr_access_flags,
64 struct ehca_pd *e_pd,
65 struct ehca_mr_pginfo *pginfo,
66 u32 *lkey,
67 u32 *rkey);
68
69int ehca_unmap_one_fmr(struct ehca_shca *shca,
70 struct ehca_mr *e_fmr);
71
72int ehca_reg_smr(struct ehca_shca *shca,
73 struct ehca_mr *e_origmr,
74 struct ehca_mr *e_newmr,
75 u64 *iova_start,
76 int acl,
77 struct ehca_pd *e_pd,
78 u32 *lkey,
79 u32 *rkey);
80
81int ehca_reg_internal_maxmr(struct ehca_shca *shca,
82 struct ehca_pd *e_pd,
83 struct ehca_mr **maxmr);
84
85int ehca_reg_maxmr(struct ehca_shca *shca,
86 struct ehca_mr *e_newmr,
87 u64 *iova_start,
88 int acl,
89 struct ehca_pd *e_pd,
90 u32 *lkey,
91 u32 *rkey);
92
93int ehca_dereg_internal_maxmr(struct ehca_shca *shca);
94
95int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array,
96 int num_phys_buf,
97 u64 *iova_start,
98 u64 *size);
99
100int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
101 u64 *page_list,
102 int list_len);
103
104int ehca_set_pagebuf(struct ehca_mr *e_mr,
105 struct ehca_mr_pginfo *pginfo,
106 u32 number,
107 u64 *kpage);
108
109int ehca_set_pagebuf_1(struct ehca_mr *e_mr,
110 struct ehca_mr_pginfo *pginfo,
111 u64 *rpage);
112
113int ehca_mr_is_maxmr(u64 size,
114 u64 *iova_start);
115
116void ehca_mrmw_map_acl(int ib_acl,
117 u32 *hipz_acl);
118
119void ehca_mrmw_set_pgsize_hipz_acl(u32 *hipz_acl);
120
121void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
122 int *ib_acl);
123
124int ehca_mrmw_map_hrc_alloc(const u64 hipz_rc);
125
126int ehca_mrmw_map_hrc_rrpg_last(const u64 hipz_rc);
127
128int ehca_mrmw_map_hrc_rrpg_notlast(const u64 hipz_rc);
129
130int ehca_mrmw_map_hrc_query_mr(const u64 hipz_rc);
131
132int ehca_mrmw_map_hrc_free_mr(const u64 hipz_rc);
133
134int ehca_mrmw_map_hrc_free_mw(const u64 hipz_rc);
135
136int ehca_mrmw_map_hrc_reg_smr(const u64 hipz_rc);
137
138void ehca_mr_deletenew(struct ehca_mr *mr);
139
140#endif /*_EHCA_MRMW_H_*/
diff --git a/drivers/infiniband/hw/ehca/ehca_pd.c b/drivers/infiniband/hw/ehca/ehca_pd.c
new file mode 100644
index 000000000000..2c3cdc6f7b39
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_pd.c
@@ -0,0 +1,114 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * PD functions
5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 *
8 * Copyright (c) 2005 IBM Corporation
9 *
10 * All rights reserved.
11 *
12 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
13 * BSD.
14 *
15 * OpenIB BSD License
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are met:
19 *
20 * Redistributions of source code must retain the above copyright notice, this
21 * list of conditions and the following disclaimer.
22 *
23 * Redistributions in binary form must reproduce the above copyright notice,
24 * this list of conditions and the following disclaimer in the documentation
25 * and/or other materials
26 * provided with the distribution.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
29 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
32 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
35 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
36 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 */
40
41#include <asm/current.h>
42
43#include "ehca_tools.h"
44#include "ehca_iverbs.h"
45
46static struct kmem_cache *pd_cache;
47
48struct ib_pd *ehca_alloc_pd(struct ib_device *device,
49 struct ib_ucontext *context, struct ib_udata *udata)
50{
51 struct ehca_pd *pd;
52
53 pd = kmem_cache_alloc(pd_cache, SLAB_KERNEL);
54 if (!pd) {
55 ehca_err(device, "device=%p context=%p out of memory",
56 device, context);
57 return ERR_PTR(-ENOMEM);
58 }
59
60 memset(pd, 0, sizeof(struct ehca_pd));
61 pd->ownpid = current->tgid;
62
63 /*
64 * Kernel PD: when device = -1, 0
65 * User PD: when context != -1
66 */
67 if (!context) {
68 /*
69 * Kernel PDs after init reuses always
70 * the one created in ehca_shca_reopen()
71 */
72 struct ehca_shca *shca = container_of(device, struct ehca_shca,
73 ib_device);
74 pd->fw_pd.value = shca->pd->fw_pd.value;
75 } else
76 pd->fw_pd.value = (u64)pd;
77
78 return &pd->ib_pd;
79}
80
81int ehca_dealloc_pd(struct ib_pd *pd)
82{
83 u32 cur_pid = current->tgid;
84 struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
85
86 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
87 my_pd->ownpid != cur_pid) {
88 ehca_err(pd->device, "Invalid caller pid=%x ownpid=%x",
89 cur_pid, my_pd->ownpid);
90 return -EINVAL;
91 }
92
93 kmem_cache_free(pd_cache,
94 container_of(pd, struct ehca_pd, ib_pd));
95
96 return 0;
97}
98
99int ehca_init_pd_cache(void)
100{
101 pd_cache = kmem_cache_create("ehca_cache_pd",
102 sizeof(struct ehca_pd), 0,
103 SLAB_HWCACHE_ALIGN,
104 NULL, NULL);
105 if (!pd_cache)
106 return -ENOMEM;
107 return 0;
108}
109
110void ehca_cleanup_pd_cache(void)
111{
112 if (pd_cache)
113 kmem_cache_destroy(pd_cache);
114}
diff --git a/drivers/infiniband/hw/ehca/ehca_qes.h b/drivers/infiniband/hw/ehca/ehca_qes.h
new file mode 100644
index 000000000000..8707d297ce4c
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_qes.h
@@ -0,0 +1,259 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Hardware request structures
5 *
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Reinhard Ernst <rernst@de.ibm.com>
8 * Christoph Raisch <raisch@de.ibm.com>
9 *
10 * Copyright (c) 2005 IBM Corporation
11 *
12 * All rights reserved.
13 *
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
15 * BSD.
16 *
17 * OpenIB BSD License
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
21 *
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
24 *
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43
44#ifndef _EHCA_QES_H_
45#define _EHCA_QES_H_
46
47#include "ehca_tools.h"
48
49/* virtual scatter gather entry to specify remote adresses with length */
50struct ehca_vsgentry {
51 u64 vaddr;
52 u32 lkey;
53 u32 length;
54};
55
56#define GRH_FLAG_MASK EHCA_BMASK_IBM(7,7)
57#define GRH_IPVERSION_MASK EHCA_BMASK_IBM(0,3)
58#define GRH_TCLASS_MASK EHCA_BMASK_IBM(4,12)
59#define GRH_FLOWLABEL_MASK EHCA_BMASK_IBM(13,31)
60#define GRH_PAYLEN_MASK EHCA_BMASK_IBM(32,47)
61#define GRH_NEXTHEADER_MASK EHCA_BMASK_IBM(48,55)
62#define GRH_HOPLIMIT_MASK EHCA_BMASK_IBM(56,63)
63
64/*
65 * Unreliable Datagram Address Vector Format
66 * see IBTA Vol1 chapter 8.3 Global Routing Header
67 */
68struct ehca_ud_av {
69 u8 sl;
70 u8 lnh;
71 u16 dlid;
72 u8 reserved1;
73 u8 reserved2;
74 u8 reserved3;
75 u8 slid_path_bits;
76 u8 reserved4;
77 u8 ipd;
78 u8 reserved5;
79 u8 pmtu;
80 u32 reserved6;
81 u64 reserved7;
82 union {
83 struct {
84 u64 word_0; /* always set to 6 */
85 /*should be 0x1B for IB transport */
86 u64 word_1;
87 u64 word_2;
88 u64 word_3;
89 u64 word_4;
90 } grh;
91 struct {
92 u32 wd_0;
93 u32 wd_1;
94 /* DWord_1 --> SGID */
95
96 u32 sgid_wd3;
97 u32 sgid_wd2;
98
99 u32 sgid_wd1;
100 u32 sgid_wd0;
101 /* DWord_3 --> DGID */
102
103 u32 dgid_wd3;
104 u32 dgid_wd2;
105
106 u32 dgid_wd1;
107 u32 dgid_wd0;
108 } grh_l;
109 };
110};
111
112/* maximum number of sg entries allowed in a WQE */
113#define MAX_WQE_SG_ENTRIES 252
114
115#define WQE_OPTYPE_SEND 0x80
116#define WQE_OPTYPE_RDMAREAD 0x40
117#define WQE_OPTYPE_RDMAWRITE 0x20
118#define WQE_OPTYPE_CMPSWAP 0x10
119#define WQE_OPTYPE_FETCHADD 0x08
120#define WQE_OPTYPE_BIND 0x04
121
122#define WQE_WRFLAG_REQ_SIGNAL_COM 0x80
123#define WQE_WRFLAG_FENCE 0x40
124#define WQE_WRFLAG_IMM_DATA_PRESENT 0x20
125#define WQE_WRFLAG_SOLIC_EVENT 0x10
126
127#define WQEF_CACHE_HINT 0x80
128#define WQEF_CACHE_HINT_RD_WR 0x40
129#define WQEF_TIMED_WQE 0x20
130#define WQEF_PURGE 0x08
131#define WQEF_HIGH_NIBBLE 0xF0
132
133#define MW_BIND_ACCESSCTRL_R_WRITE 0x40
134#define MW_BIND_ACCESSCTRL_R_READ 0x20
135#define MW_BIND_ACCESSCTRL_R_ATOMIC 0x10
136
137struct ehca_wqe {
138 u64 work_request_id;
139 u8 optype;
140 u8 wr_flag;
141 u16 pkeyi;
142 u8 wqef;
143 u8 nr_of_data_seg;
144 u16 wqe_provided_slid;
145 u32 destination_qp_number;
146 u32 resync_psn_sqp;
147 u32 local_ee_context_qkey;
148 u32 immediate_data;
149 union {
150 struct {
151 u64 remote_virtual_adress;
152 u32 rkey;
153 u32 reserved;
154 u64 atomic_1st_op_dma_len;
155 u64 atomic_2nd_op;
156 struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
157
158 } nud;
159 struct {
160 u64 ehca_ud_av_ptr;
161 u64 reserved1;
162 u64 reserved2;
163 u64 reserved3;
164 struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
165 } ud_avp;
166 struct {
167 struct ehca_ud_av ud_av;
168 struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES -
169 2];
170 } ud_av;
171 struct {
172 u64 reserved0;
173 u64 reserved1;
174 u64 reserved2;
175 u64 reserved3;
176 struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
177 } all_rcv;
178
179 struct {
180 u64 reserved;
181 u32 rkey;
182 u32 old_rkey;
183 u64 reserved1;
184 u64 reserved2;
185 u64 virtual_address;
186 u32 reserved3;
187 u32 length;
188 u32 reserved4;
189 u16 reserved5;
190 u8 reserved6;
191 u8 lr_ctl;
192 u32 lkey;
193 u32 reserved7;
194 u64 reserved8;
195 u64 reserved9;
196 u64 reserved10;
197 u64 reserved11;
198 } bind;
199 struct {
200 u64 reserved12;
201 u64 reserved13;
202 u32 size;
203 u32 start;
204 } inline_data;
205 } u;
206
207};
208
209#define WC_SEND_RECEIVE EHCA_BMASK_IBM(0,0)
210#define WC_IMM_DATA EHCA_BMASK_IBM(1,1)
211#define WC_GRH_PRESENT EHCA_BMASK_IBM(2,2)
212#define WC_SE_BIT EHCA_BMASK_IBM(3,3)
213#define WC_STATUS_ERROR_BIT 0x80000000
214#define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800
215#define WC_STATUS_PURGE_BIT 0x10
216
217struct ehca_cqe {
218 u64 work_request_id;
219 u8 optype;
220 u8 w_completion_flags;
221 u16 reserved1;
222 u32 nr_bytes_transferred;
223 u32 immediate_data;
224 u32 local_qp_number;
225 u8 freed_resource_count;
226 u8 service_level;
227 u16 wqe_count;
228 u32 qp_token;
229 u32 qkey_ee_token;
230 u32 remote_qp_number;
231 u16 dlid;
232 u16 rlid;
233 u16 reserved2;
234 u16 pkey_index;
235 u32 cqe_timestamp;
236 u32 wqe_timestamp;
237 u8 wqe_timestamp_valid;
238 u8 reserved3;
239 u8 reserved4;
240 u8 cqe_flags;
241 u32 status;
242};
243
244struct ehca_eqe {
245 u64 entry;
246};
247
248struct ehca_mrte {
249 u64 starting_va;
250 u64 length; /* length of memory region in bytes*/
251 u32 pd;
252 u8 key_instance;
253 u8 pagesize;
254 u8 mr_control;
255 u8 local_remote_access_ctrl;
256 u8 reserved[0x20 - 0x18];
257 u64 at_pointer[4];
258};
259#endif /*_EHCA_QES_H_*/
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
new file mode 100644
index 000000000000..4394123cdbd7
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -0,0 +1,1507 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * QP functions
5 *
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Reinhard Ernst <rernst@de.ibm.com>
9 * Heiko J Schick <schickhj@de.ibm.com>
10 *
11 * Copyright (c) 2005 IBM Corporation
12 *
13 * All rights reserved.
14 *
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
16 * BSD.
17 *
18 * OpenIB BSD License
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
22 *
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
25 *
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
42 */
43
44
45#include <asm/current.h>
46
47#include "ehca_classes.h"
48#include "ehca_tools.h"
49#include "ehca_qes.h"
50#include "ehca_iverbs.h"
51#include "hcp_if.h"
52#include "hipz_fns.h"
53
54static struct kmem_cache *qp_cache;
55
56/*
57 * attributes not supported by query qp
58 */
59#define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_MAX_DEST_RD_ATOMIC | \
60 IB_QP_MAX_QP_RD_ATOMIC | \
61 IB_QP_ACCESS_FLAGS | \
62 IB_QP_EN_SQD_ASYNC_NOTIFY)
63
64/*
65 * ehca (internal) qp state values
66 */
67enum ehca_qp_state {
68 EHCA_QPS_RESET = 1,
69 EHCA_QPS_INIT = 2,
70 EHCA_QPS_RTR = 3,
71 EHCA_QPS_RTS = 5,
72 EHCA_QPS_SQD = 6,
73 EHCA_QPS_SQE = 8,
74 EHCA_QPS_ERR = 128
75};
76
77/*
78 * qp state transitions as defined by IB Arch Rel 1.1 page 431
79 */
80enum ib_qp_statetrans {
81 IB_QPST_ANY2RESET,
82 IB_QPST_ANY2ERR,
83 IB_QPST_RESET2INIT,
84 IB_QPST_INIT2RTR,
85 IB_QPST_INIT2INIT,
86 IB_QPST_RTR2RTS,
87 IB_QPST_RTS2SQD,
88 IB_QPST_RTS2RTS,
89 IB_QPST_SQD2RTS,
90 IB_QPST_SQE2RTS,
91 IB_QPST_SQD2SQD,
92 IB_QPST_MAX /* nr of transitions, this must be last!!! */
93};
94
95/*
96 * ib2ehca_qp_state maps IB to ehca qp_state
97 * returns ehca qp state corresponding to given ib qp state
98 */
99static inline enum ehca_qp_state ib2ehca_qp_state(enum ib_qp_state ib_qp_state)
100{
101 switch (ib_qp_state) {
102 case IB_QPS_RESET:
103 return EHCA_QPS_RESET;
104 case IB_QPS_INIT:
105 return EHCA_QPS_INIT;
106 case IB_QPS_RTR:
107 return EHCA_QPS_RTR;
108 case IB_QPS_RTS:
109 return EHCA_QPS_RTS;
110 case IB_QPS_SQD:
111 return EHCA_QPS_SQD;
112 case IB_QPS_SQE:
113 return EHCA_QPS_SQE;
114 case IB_QPS_ERR:
115 return EHCA_QPS_ERR;
116 default:
117 ehca_gen_err("invalid ib_qp_state=%x", ib_qp_state);
118 return -EINVAL;
119 }
120}
121
122/*
123 * ehca2ib_qp_state maps ehca to IB qp_state
124 * returns ib qp state corresponding to given ehca qp state
125 */
126static inline enum ib_qp_state ehca2ib_qp_state(enum ehca_qp_state
127 ehca_qp_state)
128{
129 switch (ehca_qp_state) {
130 case EHCA_QPS_RESET:
131 return IB_QPS_RESET;
132 case EHCA_QPS_INIT:
133 return IB_QPS_INIT;
134 case EHCA_QPS_RTR:
135 return IB_QPS_RTR;
136 case EHCA_QPS_RTS:
137 return IB_QPS_RTS;
138 case EHCA_QPS_SQD:
139 return IB_QPS_SQD;
140 case EHCA_QPS_SQE:
141 return IB_QPS_SQE;
142 case EHCA_QPS_ERR:
143 return IB_QPS_ERR;
144 default:
145 ehca_gen_err("invalid ehca_qp_state=%x", ehca_qp_state);
146 return -EINVAL;
147 }
148}
149
150/*
151 * ehca_qp_type used as index for req_attr and opt_attr of
152 * struct ehca_modqp_statetrans
153 */
154enum ehca_qp_type {
155 QPT_RC = 0,
156 QPT_UC = 1,
157 QPT_UD = 2,
158 QPT_SQP = 3,
159 QPT_MAX
160};
161
162/*
163 * ib2ehcaqptype maps Ib to ehca qp_type
164 * returns ehca qp type corresponding to ib qp type
165 */
166static inline enum ehca_qp_type ib2ehcaqptype(enum ib_qp_type ibqptype)
167{
168 switch (ibqptype) {
169 case IB_QPT_SMI:
170 case IB_QPT_GSI:
171 return QPT_SQP;
172 case IB_QPT_RC:
173 return QPT_RC;
174 case IB_QPT_UC:
175 return QPT_UC;
176 case IB_QPT_UD:
177 return QPT_UD;
178 default:
179 ehca_gen_err("Invalid ibqptype=%x", ibqptype);
180 return -EINVAL;
181 }
182}
183
184static inline enum ib_qp_statetrans get_modqp_statetrans(int ib_fromstate,
185 int ib_tostate)
186{
187 int index = -EINVAL;
188 switch (ib_tostate) {
189 case IB_QPS_RESET:
190 index = IB_QPST_ANY2RESET;
191 break;
192 case IB_QPS_INIT:
193 switch (ib_fromstate) {
194 case IB_QPS_RESET:
195 index = IB_QPST_RESET2INIT;
196 break;
197 case IB_QPS_INIT:
198 index = IB_QPST_INIT2INIT;
199 break;
200 }
201 break;
202 case IB_QPS_RTR:
203 if (ib_fromstate == IB_QPS_INIT)
204 index = IB_QPST_INIT2RTR;
205 break;
206 case IB_QPS_RTS:
207 switch (ib_fromstate) {
208 case IB_QPS_RTR:
209 index = IB_QPST_RTR2RTS;
210 break;
211 case IB_QPS_RTS:
212 index = IB_QPST_RTS2RTS;
213 break;
214 case IB_QPS_SQD:
215 index = IB_QPST_SQD2RTS;
216 break;
217 case IB_QPS_SQE:
218 index = IB_QPST_SQE2RTS;
219 break;
220 }
221 break;
222 case IB_QPS_SQD:
223 if (ib_fromstate == IB_QPS_RTS)
224 index = IB_QPST_RTS2SQD;
225 break;
226 case IB_QPS_SQE:
227 break;
228 case IB_QPS_ERR:
229 index = IB_QPST_ANY2ERR;
230 break;
231 default:
232 break;
233 }
234 return index;
235}
236
237enum ehca_service_type {
238 ST_RC = 0,
239 ST_UC = 1,
240 ST_RD = 2,
241 ST_UD = 3
242};
243
244/*
245 * ibqptype2servicetype returns hcp service type corresponding to given
246 * ib qp type used by create_qp()
247 */
248static inline int ibqptype2servicetype(enum ib_qp_type ibqptype)
249{
250 switch (ibqptype) {
251 case IB_QPT_SMI:
252 case IB_QPT_GSI:
253 return ST_UD;
254 case IB_QPT_RC:
255 return ST_RC;
256 case IB_QPT_UC:
257 return ST_UC;
258 case IB_QPT_UD:
259 return ST_UD;
260 case IB_QPT_RAW_IPV6:
261 return -EINVAL;
262 case IB_QPT_RAW_ETY:
263 return -EINVAL;
264 default:
265 ehca_gen_err("Invalid ibqptype=%x", ibqptype);
266 return -EINVAL;
267 }
268}
269
270/*
271 * init_qp_queues initializes/constructs r/squeue and registers queue pages.
272 */
273static inline int init_qp_queues(struct ehca_shca *shca,
274 struct ehca_qp *my_qp,
275 int nr_sq_pages,
276 int nr_rq_pages,
277 int swqe_size,
278 int rwqe_size,
279 int nr_send_sges, int nr_receive_sges)
280{
281 int ret, cnt, ipz_rc;
282 void *vpage;
283 u64 rpage, h_ret;
284 struct ib_device *ib_dev = &shca->ib_device;
285 struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle;
286
287 ipz_rc = ipz_queue_ctor(&my_qp->ipz_squeue,
288 nr_sq_pages,
289 EHCA_PAGESIZE, swqe_size, nr_send_sges);
290 if (!ipz_rc) {
291 ehca_err(ib_dev,"Cannot allocate page for squeue. ipz_rc=%x",
292 ipz_rc);
293 return -EBUSY;
294 }
295
296 ipz_rc = ipz_queue_ctor(&my_qp->ipz_rqueue,
297 nr_rq_pages,
298 EHCA_PAGESIZE, rwqe_size, nr_receive_sges);
299 if (!ipz_rc) {
300 ehca_err(ib_dev, "Cannot allocate page for rqueue. ipz_rc=%x",
301 ipz_rc);
302 ret = -EBUSY;
303 goto init_qp_queues0;
304 }
305 /* register SQ pages */
306 for (cnt = 0; cnt < nr_sq_pages; cnt++) {
307 vpage = ipz_qpageit_get_inc(&my_qp->ipz_squeue);
308 if (!vpage) {
309 ehca_err(ib_dev, "SQ ipz_qpageit_get_inc() "
310 "failed p_vpage= %p", vpage);
311 ret = -EINVAL;
312 goto init_qp_queues1;
313 }
314 rpage = virt_to_abs(vpage);
315
316 h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
317 my_qp->ipz_qp_handle,
318 &my_qp->pf, 0, 0,
319 rpage, 1,
320 my_qp->galpas.kernel);
321 if (h_ret < H_SUCCESS) {
322 ehca_err(ib_dev, "SQ hipz_qp_register_rpage()"
323 " failed rc=%lx", h_ret);
324 ret = ehca2ib_return_code(h_ret);
325 goto init_qp_queues1;
326 }
327 }
328
329 ipz_qeit_reset(&my_qp->ipz_squeue);
330
331 /* register RQ pages */
332 for (cnt = 0; cnt < nr_rq_pages; cnt++) {
333 vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
334 if (!vpage) {
335 ehca_err(ib_dev, "RQ ipz_qpageit_get_inc() "
336 "failed p_vpage = %p", vpage);
337 ret = -EINVAL;
338 goto init_qp_queues1;
339 }
340
341 rpage = virt_to_abs(vpage);
342
343 h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
344 my_qp->ipz_qp_handle,
345 &my_qp->pf, 0, 1,
346 rpage, 1,my_qp->galpas.kernel);
347 if (h_ret < H_SUCCESS) {
348 ehca_err(ib_dev, "RQ hipz_qp_register_rpage() failed "
349 "rc=%lx", h_ret);
350 ret = ehca2ib_return_code(h_ret);
351 goto init_qp_queues1;
352 }
353 if (cnt == (nr_rq_pages - 1)) { /* last page! */
354 if (h_ret != H_SUCCESS) {
355 ehca_err(ib_dev, "RQ hipz_qp_register_rpage() "
356 "h_ret= %lx ", h_ret);
357 ret = ehca2ib_return_code(h_ret);
358 goto init_qp_queues1;
359 }
360 vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
361 if (vpage) {
362 ehca_err(ib_dev, "ipz_qpageit_get_inc() "
363 "should not succeed vpage=%p", vpage);
364 ret = -EINVAL;
365 goto init_qp_queues1;
366 }
367 } else {
368 if (h_ret != H_PAGE_REGISTERED) {
369 ehca_err(ib_dev, "RQ hipz_qp_register_rpage() "
370 "h_ret= %lx ", h_ret);
371 ret = ehca2ib_return_code(h_ret);
372 goto init_qp_queues1;
373 }
374 }
375 }
376
377 ipz_qeit_reset(&my_qp->ipz_rqueue);
378
379 return 0;
380
381init_qp_queues1:
382 ipz_queue_dtor(&my_qp->ipz_rqueue);
383init_qp_queues0:
384 ipz_queue_dtor(&my_qp->ipz_squeue);
385 return ret;
386}
387
388struct ib_qp *ehca_create_qp(struct ib_pd *pd,
389 struct ib_qp_init_attr *init_attr,
390 struct ib_udata *udata)
391{
392 static int da_rc_msg_size[]={ 128, 256, 512, 1024, 2048, 4096 };
393 static int da_ud_sq_msg_size[]={ 128, 384, 896, 1920, 3968 };
394 struct ehca_qp *my_qp;
395 struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
396 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
397 ib_device);
398 struct ib_ucontext *context = NULL;
399 u64 h_ret;
400 int max_send_sge, max_recv_sge, ret;
401
402 /* h_call's out parameters */
403 struct ehca_alloc_qp_parms parms;
404 u32 swqe_size = 0, rwqe_size = 0;
405 u8 daqp_completion, isdaqp;
406 unsigned long flags;
407
408 if (init_attr->sq_sig_type != IB_SIGNAL_REQ_WR &&
409 init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) {
410 ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed",
411 init_attr->sq_sig_type);
412 return ERR_PTR(-EINVAL);
413 }
414
415 /* save daqp completion bits */
416 daqp_completion = init_attr->qp_type & 0x60;
417 /* save daqp bit */
418 isdaqp = (init_attr->qp_type & 0x80) ? 1 : 0;
419 init_attr->qp_type = init_attr->qp_type & 0x1F;
420
421 if (init_attr->qp_type != IB_QPT_UD &&
422 init_attr->qp_type != IB_QPT_SMI &&
423 init_attr->qp_type != IB_QPT_GSI &&
424 init_attr->qp_type != IB_QPT_UC &&
425 init_attr->qp_type != IB_QPT_RC) {
426 ehca_err(pd->device, "wrong QP Type=%x", init_attr->qp_type);
427 return ERR_PTR(-EINVAL);
428 }
429 if ((init_attr->qp_type != IB_QPT_RC && init_attr->qp_type != IB_QPT_UD)
430 && isdaqp) {
431 ehca_err(pd->device, "unsupported LL QP Type=%x",
432 init_attr->qp_type);
433 return ERR_PTR(-EINVAL);
434 } else if (init_attr->qp_type == IB_QPT_RC && isdaqp &&
435 (init_attr->cap.max_send_wr > 255 ||
436 init_attr->cap.max_recv_wr > 255 )) {
437 ehca_err(pd->device, "Invalid Number of max_sq_wr =%x "
438 "or max_rq_wr=%x for QP Type=%x",
439 init_attr->cap.max_send_wr,
440 init_attr->cap.max_recv_wr,init_attr->qp_type);
441 return ERR_PTR(-EINVAL);
442 } else if (init_attr->qp_type == IB_QPT_UD && isdaqp &&
443 init_attr->cap.max_send_wr > 255) {
444 ehca_err(pd->device,
445 "Invalid Number of max_send_wr=%x for UD QP_TYPE=%x",
446 init_attr->cap.max_send_wr, init_attr->qp_type);
447 return ERR_PTR(-EINVAL);
448 }
449
450 if (pd->uobject && udata)
451 context = pd->uobject->context;
452
453 my_qp = kmem_cache_alloc(qp_cache, SLAB_KERNEL);
454 if (!my_qp) {
455 ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
456 return ERR_PTR(-ENOMEM);
457 }
458
459 memset(my_qp, 0, sizeof(struct ehca_qp));
460 memset (&parms, 0, sizeof(struct ehca_alloc_qp_parms));
461 spin_lock_init(&my_qp->spinlock_s);
462 spin_lock_init(&my_qp->spinlock_r);
463
464 my_qp->recv_cq =
465 container_of(init_attr->recv_cq, struct ehca_cq, ib_cq);
466 my_qp->send_cq =
467 container_of(init_attr->send_cq, struct ehca_cq, ib_cq);
468
469 my_qp->init_attr = *init_attr;
470
471 do {
472 if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) {
473 ret = -ENOMEM;
474 ehca_err(pd->device, "Can't reserve idr resources.");
475 goto create_qp_exit0;
476 }
477
478 spin_lock_irqsave(&ehca_qp_idr_lock, flags);
479 ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token);
480 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
481
482 } while (ret == -EAGAIN);
483
484 if (ret) {
485 ret = -ENOMEM;
486 ehca_err(pd->device, "Can't allocate new idr entry.");
487 goto create_qp_exit0;
488 }
489
490 parms.servicetype = ibqptype2servicetype(init_attr->qp_type);
491 if (parms.servicetype < 0) {
492 ret = -EINVAL;
493 ehca_err(pd->device, "Invalid qp_type=%x", init_attr->qp_type);
494 goto create_qp_exit0;
495 }
496
497 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
498 parms.sigtype = HCALL_SIGT_EVERY;
499 else
500 parms.sigtype = HCALL_SIGT_BY_WQE;
501
502 /* UD_AV CIRCUMVENTION */
503 max_send_sge = init_attr->cap.max_send_sge;
504 max_recv_sge = init_attr->cap.max_recv_sge;
505 if (IB_QPT_UD == init_attr->qp_type ||
506 IB_QPT_GSI == init_attr->qp_type ||
507 IB_QPT_SMI == init_attr->qp_type) {
508 max_send_sge += 2;
509 max_recv_sge += 2;
510 }
511
512 parms.ipz_eq_handle = shca->eq.ipz_eq_handle;
513 parms.daqp_ctrl = isdaqp | daqp_completion;
514 parms.pd = my_pd->fw_pd;
515 parms.max_recv_sge = max_recv_sge;
516 parms.max_send_sge = max_send_sge;
517
518 h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, my_qp, &parms);
519
520 if (h_ret != H_SUCCESS) {
521 ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lx",
522 h_ret);
523 ret = ehca2ib_return_code(h_ret);
524 goto create_qp_exit1;
525 }
526
527 switch (init_attr->qp_type) {
528 case IB_QPT_RC:
529 if (isdaqp == 0) {
530 swqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[
531 (parms.act_nr_send_sges)]);
532 rwqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[
533 (parms.act_nr_recv_sges)]);
534 } else { /* for daqp we need to use msg size, not wqe size */
535 swqe_size = da_rc_msg_size[max_send_sge];
536 rwqe_size = da_rc_msg_size[max_recv_sge];
537 parms.act_nr_send_sges = 1;
538 parms.act_nr_recv_sges = 1;
539 }
540 break;
541 case IB_QPT_UC:
542 swqe_size = offsetof(struct ehca_wqe,
543 u.nud.sg_list[parms.act_nr_send_sges]);
544 rwqe_size = offsetof(struct ehca_wqe,
545 u.nud.sg_list[parms.act_nr_recv_sges]);
546 break;
547
548 case IB_QPT_UD:
549 case IB_QPT_GSI:
550 case IB_QPT_SMI:
551 /* UD circumvention */
552 parms.act_nr_recv_sges -= 2;
553 parms.act_nr_send_sges -= 2;
554 if (isdaqp) {
555 swqe_size = da_ud_sq_msg_size[max_send_sge];
556 rwqe_size = da_rc_msg_size[max_recv_sge];
557 parms.act_nr_send_sges = 1;
558 parms.act_nr_recv_sges = 1;
559 } else {
560 swqe_size = offsetof(struct ehca_wqe,
561 u.ud_av.sg_list[parms.act_nr_send_sges]);
562 rwqe_size = offsetof(struct ehca_wqe,
563 u.ud_av.sg_list[parms.act_nr_recv_sges]);
564 }
565
566 if (IB_QPT_GSI == init_attr->qp_type ||
567 IB_QPT_SMI == init_attr->qp_type) {
568 parms.act_nr_send_wqes = init_attr->cap.max_send_wr;
569 parms.act_nr_recv_wqes = init_attr->cap.max_recv_wr;
570 parms.act_nr_send_sges = init_attr->cap.max_send_sge;
571 parms.act_nr_recv_sges = init_attr->cap.max_recv_sge;
572 my_qp->real_qp_num =
573 (init_attr->qp_type == IB_QPT_SMI) ? 0 : 1;
574 }
575
576 break;
577
578 default:
579 break;
580 }
581
582 /* initializes r/squeue and registers queue pages */
583 ret = init_qp_queues(shca, my_qp,
584 parms.nr_sq_pages, parms.nr_rq_pages,
585 swqe_size, rwqe_size,
586 parms.act_nr_send_sges, parms.act_nr_recv_sges);
587 if (ret) {
588 ehca_err(pd->device,
589 "Couldn't initialize r/squeue and pages ret=%x", ret);
590 goto create_qp_exit2;
591 }
592
593 my_qp->ib_qp.pd = &my_pd->ib_pd;
594 my_qp->ib_qp.device = my_pd->ib_pd.device;
595
596 my_qp->ib_qp.recv_cq = init_attr->recv_cq;
597 my_qp->ib_qp.send_cq = init_attr->send_cq;
598
599 my_qp->ib_qp.qp_num = my_qp->real_qp_num;
600 my_qp->ib_qp.qp_type = init_attr->qp_type;
601
602 my_qp->qp_type = init_attr->qp_type;
603 my_qp->ib_qp.srq = init_attr->srq;
604
605 my_qp->ib_qp.qp_context = init_attr->qp_context;
606 my_qp->ib_qp.event_handler = init_attr->event_handler;
607
608 init_attr->cap.max_inline_data = 0; /* not supported yet */
609 init_attr->cap.max_recv_sge = parms.act_nr_recv_sges;
610 init_attr->cap.max_recv_wr = parms.act_nr_recv_wqes;
611 init_attr->cap.max_send_sge = parms.act_nr_send_sges;
612 init_attr->cap.max_send_wr = parms.act_nr_send_wqes;
613
614 /* NOTE: define_apq0() not supported yet */
615 if (init_attr->qp_type == IB_QPT_GSI) {
616 h_ret = ehca_define_sqp(shca, my_qp, init_attr);
617 if (h_ret != H_SUCCESS) {
618 ehca_err(pd->device, "ehca_define_sqp() failed rc=%lx",
619 h_ret);
620 ret = ehca2ib_return_code(h_ret);
621 goto create_qp_exit3;
622 }
623 }
624 if (init_attr->send_cq) {
625 struct ehca_cq *cq = container_of(init_attr->send_cq,
626 struct ehca_cq, ib_cq);
627 ret = ehca_cq_assign_qp(cq, my_qp);
628 if (ret) {
629 ehca_err(pd->device, "Couldn't assign qp to send_cq ret=%x",
630 ret);
631 goto create_qp_exit3;
632 }
633 my_qp->send_cq = cq;
634 }
635 /* copy queues, galpa data to user space */
636 if (context && udata) {
637 struct ipz_queue *ipz_rqueue = &my_qp->ipz_rqueue;
638 struct ipz_queue *ipz_squeue = &my_qp->ipz_squeue;
639 struct ehca_create_qp_resp resp;
640 struct vm_area_struct * vma;
641 memset(&resp, 0, sizeof(resp));
642
643 resp.qp_num = my_qp->real_qp_num;
644 resp.token = my_qp->token;
645 resp.qp_type = my_qp->qp_type;
646 resp.qkey = my_qp->qkey;
647 resp.real_qp_num = my_qp->real_qp_num;
648 /* rqueue properties */
649 resp.ipz_rqueue.qe_size = ipz_rqueue->qe_size;
650 resp.ipz_rqueue.act_nr_of_sg = ipz_rqueue->act_nr_of_sg;
651 resp.ipz_rqueue.queue_length = ipz_rqueue->queue_length;
652 resp.ipz_rqueue.pagesize = ipz_rqueue->pagesize;
653 resp.ipz_rqueue.toggle_state = ipz_rqueue->toggle_state;
654 ret = ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x22000000,
655 ipz_rqueue->queue_length,
656 (void**)&resp.ipz_rqueue.queue,
657 &vma);
658 if (ret) {
659 ehca_err(pd->device, "Could not mmap rqueue pages");
660 goto create_qp_exit3;
661 }
662 my_qp->uspace_rqueue = resp.ipz_rqueue.queue;
663 /* squeue properties */
664 resp.ipz_squeue.qe_size = ipz_squeue->qe_size;
665 resp.ipz_squeue.act_nr_of_sg = ipz_squeue->act_nr_of_sg;
666 resp.ipz_squeue.queue_length = ipz_squeue->queue_length;
667 resp.ipz_squeue.pagesize = ipz_squeue->pagesize;
668 resp.ipz_squeue.toggle_state = ipz_squeue->toggle_state;
669 ret = ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x23000000,
670 ipz_squeue->queue_length,
671 (void**)&resp.ipz_squeue.queue,
672 &vma);
673 if (ret) {
674 ehca_err(pd->device, "Could not mmap squeue pages");
675 goto create_qp_exit4;
676 }
677 my_qp->uspace_squeue = resp.ipz_squeue.queue;
678 /* fw_handle */
679 resp.galpas = my_qp->galpas;
680 ret = ehca_mmap_register(my_qp->galpas.user.fw_handle,
681 (void**)&resp.galpas.kernel.fw_handle,
682 &vma);
683 if (ret) {
684 ehca_err(pd->device, "Could not mmap fw_handle");
685 goto create_qp_exit5;
686 }
687 my_qp->uspace_fwh = (u64)resp.galpas.kernel.fw_handle;
688
689 if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
690 ehca_err(pd->device, "Copy to udata failed");
691 ret = -EINVAL;
692 goto create_qp_exit6;
693 }
694 }
695
696 return &my_qp->ib_qp;
697
698create_qp_exit6:
699 ehca_munmap(my_qp->uspace_fwh, EHCA_PAGESIZE);
700
701create_qp_exit5:
702 ehca_munmap(my_qp->uspace_squeue, my_qp->ipz_squeue.queue_length);
703
704create_qp_exit4:
705 ehca_munmap(my_qp->uspace_rqueue, my_qp->ipz_rqueue.queue_length);
706
707create_qp_exit3:
708 ipz_queue_dtor(&my_qp->ipz_rqueue);
709 ipz_queue_dtor(&my_qp->ipz_squeue);
710
711create_qp_exit2:
712 hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
713
714create_qp_exit1:
715 spin_lock_irqsave(&ehca_qp_idr_lock, flags);
716 idr_remove(&ehca_qp_idr, my_qp->token);
717 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
718
719create_qp_exit0:
720 kmem_cache_free(qp_cache, my_qp);
721 return ERR_PTR(ret);
722}
723
724/*
725 * prepare_sqe_rts called by internal_modify_qp() at trans sqe -> rts
726 * set purge bit of bad wqe and subsequent wqes to avoid reentering sqe
727 * returns total number of bad wqes in bad_wqe_cnt
728 */
729static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
730 int *bad_wqe_cnt)
731{
732 u64 h_ret;
733 struct ipz_queue *squeue;
734 void *bad_send_wqe_p, *bad_send_wqe_v;
735 void *squeue_start_p, *squeue_end_p;
736 void *squeue_start_v, *squeue_end_v;
737 struct ehca_wqe *wqe;
738 int qp_num = my_qp->ib_qp.qp_num;
739
740 /* get send wqe pointer */
741 h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
742 my_qp->ipz_qp_handle, &my_qp->pf,
743 &bad_send_wqe_p, NULL, 2);
744 if (h_ret != H_SUCCESS) {
745 ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed"
746 " ehca_qp=%p qp_num=%x h_ret=%lx",
747 my_qp, qp_num, h_ret);
748 return ehca2ib_return_code(h_ret);
749 }
750 bad_send_wqe_p = (void*)((u64)bad_send_wqe_p & (~(1L<<63)));
751 ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p",
752 qp_num, bad_send_wqe_p);
753 /* convert wqe pointer to vadr */
754 bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p);
755 if (ehca_debug_level)
756 ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
757 squeue = &my_qp->ipz_squeue;
758 squeue_start_p = (void*)virt_to_abs(ipz_qeit_calc(squeue, 0L));
759 squeue_end_p = squeue_start_p+squeue->queue_length;
760 squeue_start_v = abs_to_virt((u64)squeue_start_p);
761 squeue_end_v = abs_to_virt((u64)squeue_end_p);
762 ehca_dbg(&shca->ib_device, "qp_num=%x squeue_start_v=%p squeue_end_v=%p",
763 qp_num, squeue_start_v, squeue_end_v);
764
765 /* loop sets wqe's purge bit */
766 wqe = (struct ehca_wqe*)bad_send_wqe_v;
767 *bad_wqe_cnt = 0;
768 while (wqe->optype != 0xff && wqe->wqef != 0xff) {
769 if (ehca_debug_level)
770 ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num);
771 wqe->nr_of_data_seg = 0; /* suppress data access */
772 wqe->wqef = WQEF_PURGE; /* WQE to be purged */
773 wqe = (struct ehca_wqe*)((u8*)wqe+squeue->qe_size);
774 *bad_wqe_cnt = (*bad_wqe_cnt)+1;
775 if ((void*)wqe >= squeue_end_v) {
776 wqe = squeue_start_v;
777 }
778 }
779 /*
780 * bad wqe will be reprocessed and ignored when pol_cq() is called,
781 * i.e. nr of wqes with flush error status is one less
782 */
783 ehca_dbg(&shca->ib_device, "qp_num=%x flusherr_wqe_cnt=%x",
784 qp_num, (*bad_wqe_cnt)-1);
785 wqe->wqef = 0;
786
787 return 0;
788}
789
790/*
791 * internal_modify_qp with circumvention to handle aqp0 properly
792 * smi_reset2init indicates if this is an internal reset-to-init-call for
793 * smi. This flag must always be zero if called from ehca_modify_qp()!
794 * This internal func was intorduced to avoid recursion of ehca_modify_qp()!
795 */
796static int internal_modify_qp(struct ib_qp *ibqp,
797 struct ib_qp_attr *attr,
798 int attr_mask, int smi_reset2init)
799{
800 enum ib_qp_state qp_cur_state, qp_new_state;
801 int cnt, qp_attr_idx, ret = 0;
802 enum ib_qp_statetrans statetrans;
803 struct hcp_modify_qp_control_block *mqpcb;
804 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
805 struct ehca_shca *shca =
806 container_of(ibqp->pd->device, struct ehca_shca, ib_device);
807 u64 update_mask;
808 u64 h_ret;
809 int bad_wqe_cnt = 0;
810 int squeue_locked = 0;
811 unsigned long spl_flags = 0;
812
813 /* do query_qp to obtain current attr values */
814 mqpcb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
815 if (mqpcb == NULL) {
816 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
817 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
818 return -ENOMEM;
819 }
820
821 h_ret = hipz_h_query_qp(shca->ipz_hca_handle,
822 my_qp->ipz_qp_handle,
823 &my_qp->pf,
824 mqpcb, my_qp->galpas.kernel);
825 if (h_ret != H_SUCCESS) {
826 ehca_err(ibqp->device, "hipz_h_query_qp() failed "
827 "ehca_qp=%p qp_num=%x h_ret=%lx",
828 my_qp, ibqp->qp_num, h_ret);
829 ret = ehca2ib_return_code(h_ret);
830 goto modify_qp_exit1;
831 }
832
833 qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state);
834
835 if (qp_cur_state == -EINVAL) { /* invalid qp state */
836 ret = -EINVAL;
837 ehca_err(ibqp->device, "Invalid current ehca_qp_state=%x "
838 "ehca_qp=%p qp_num=%x",
839 mqpcb->qp_state, my_qp, ibqp->qp_num);
840 goto modify_qp_exit1;
841 }
842 /*
843 * circumvention to set aqp0 initial state to init
844 * as expected by IB spec
845 */
846 if (smi_reset2init == 0 &&
847 ibqp->qp_type == IB_QPT_SMI &&
848 qp_cur_state == IB_QPS_RESET &&
849 (attr_mask & IB_QP_STATE) &&
850 attr->qp_state == IB_QPS_INIT) { /* RESET -> INIT */
851 struct ib_qp_attr smiqp_attr = {
852 .qp_state = IB_QPS_INIT,
853 .port_num = my_qp->init_attr.port_num,
854 .pkey_index = 0,
855 .qkey = 0
856 };
857 int smiqp_attr_mask = IB_QP_STATE | IB_QP_PORT |
858 IB_QP_PKEY_INDEX | IB_QP_QKEY;
859 int smirc = internal_modify_qp(
860 ibqp, &smiqp_attr, smiqp_attr_mask, 1);
861 if (smirc) {
862 ehca_err(ibqp->device, "SMI RESET -> INIT failed. "
863 "ehca_modify_qp() rc=%x", smirc);
864 ret = H_PARAMETER;
865 goto modify_qp_exit1;
866 }
867 qp_cur_state = IB_QPS_INIT;
868 ehca_dbg(ibqp->device, "SMI RESET -> INIT succeeded");
869 }
870 /* is transmitted current state equal to "real" current state */
871 if ((attr_mask & IB_QP_CUR_STATE) &&
872 qp_cur_state != attr->cur_qp_state) {
873 ret = -EINVAL;
874 ehca_err(ibqp->device,
875 "Invalid IB_QP_CUR_STATE attr->curr_qp_state=%x <>"
876 " actual cur_qp_state=%x. ehca_qp=%p qp_num=%x",
877 attr->cur_qp_state, qp_cur_state, my_qp, ibqp->qp_num);
878 goto modify_qp_exit1;
879 }
880
881 ehca_dbg(ibqp->device,"ehca_qp=%p qp_num=%x current qp_state=%x "
882 "new qp_state=%x attribute_mask=%x",
883 my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state, attr_mask);
884
885 qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state;
886 if (!smi_reset2init &&
887 !ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type,
888 attr_mask)) {
889 ret = -EINVAL;
890 ehca_err(ibqp->device,
891 "Invalid qp transition new_state=%x cur_state=%x "
892 "ehca_qp=%p qp_num=%x attr_mask=%x", qp_new_state,
893 qp_cur_state, my_qp, ibqp->qp_num, attr_mask);
894 goto modify_qp_exit1;
895 }
896
897 if ((mqpcb->qp_state = ib2ehca_qp_state(qp_new_state)))
898 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
899 else {
900 ret = -EINVAL;
901 ehca_err(ibqp->device, "Invalid new qp state=%x "
902 "ehca_qp=%p qp_num=%x",
903 qp_new_state, my_qp, ibqp->qp_num);
904 goto modify_qp_exit1;
905 }
906
907 /* retrieve state transition struct to get req and opt attrs */
908 statetrans = get_modqp_statetrans(qp_cur_state, qp_new_state);
909 if (statetrans < 0) {
910 ret = -EINVAL;
911 ehca_err(ibqp->device, "<INVALID STATE CHANGE> qp_cur_state=%x "
912 "new_qp_state=%x State_xsition=%x ehca_qp=%p "
913 "qp_num=%x", qp_cur_state, qp_new_state,
914 statetrans, my_qp, ibqp->qp_num);
915 goto modify_qp_exit1;
916 }
917
918 qp_attr_idx = ib2ehcaqptype(ibqp->qp_type);
919
920 if (qp_attr_idx < 0) {
921 ret = qp_attr_idx;
922 ehca_err(ibqp->device,
923 "Invalid QP type=%x ehca_qp=%p qp_num=%x",
924 ibqp->qp_type, my_qp, ibqp->qp_num);
925 goto modify_qp_exit1;
926 }
927
928 ehca_dbg(ibqp->device,
929 "ehca_qp=%p qp_num=%x <VALID STATE CHANGE> qp_state_xsit=%x",
930 my_qp, ibqp->qp_num, statetrans);
931
932 /* sqe -> rts: set purge bit of bad wqe before actual trans */
933 if ((my_qp->qp_type == IB_QPT_UD ||
934 my_qp->qp_type == IB_QPT_GSI ||
935 my_qp->qp_type == IB_QPT_SMI) &&
936 statetrans == IB_QPST_SQE2RTS) {
937 /* mark next free wqe if kernel */
938 if (my_qp->uspace_squeue == 0) {
939 struct ehca_wqe *wqe;
940 /* lock send queue */
941 spin_lock_irqsave(&my_qp->spinlock_s, spl_flags);
942 squeue_locked = 1;
943 /* mark next free wqe */
944 wqe = (struct ehca_wqe*)
945 ipz_qeit_get(&my_qp->ipz_squeue);
946 wqe->optype = wqe->wqef = 0xff;
947 ehca_dbg(ibqp->device, "qp_num=%x next_free_wqe=%p",
948 ibqp->qp_num, wqe);
949 }
950 ret = prepare_sqe_rts(my_qp, shca, &bad_wqe_cnt);
951 if (ret) {
952 ehca_err(ibqp->device, "prepare_sqe_rts() failed "
953 "ehca_qp=%p qp_num=%x ret=%x",
954 my_qp, ibqp->qp_num, ret);
955 goto modify_qp_exit2;
956 }
957 }
958
959 /*
960 * enable RDMA_Atomic_Control if reset->init und reliable con
961 * this is necessary since gen2 does not provide that flag,
962 * but pHyp requires it
963 */
964 if (statetrans == IB_QPST_RESET2INIT &&
965 (ibqp->qp_type == IB_QPT_RC || ibqp->qp_type == IB_QPT_UC)) {
966 mqpcb->rdma_atomic_ctrl = 3;
967 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RDMA_ATOMIC_CTRL, 1);
968 }
969 /* circ. pHyp requires #RDMA/Atomic Resp Res for UC INIT -> RTR */
970 if (statetrans == IB_QPST_INIT2RTR &&
971 (ibqp->qp_type == IB_QPT_UC) &&
972 !(attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)) {
973 mqpcb->rdma_nr_atomic_resp_res = 1; /* default to 1 */
974 update_mask |=
975 EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
976 }
977
978 if (attr_mask & IB_QP_PKEY_INDEX) {
979 mqpcb->prim_p_key_idx = attr->pkey_index;
980 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX, 1);
981 }
982 if (attr_mask & IB_QP_PORT) {
983 if (attr->port_num < 1 || attr->port_num > shca->num_ports) {
984 ret = -EINVAL;
985 ehca_err(ibqp->device, "Invalid port=%x. "
986 "ehca_qp=%p qp_num=%x num_ports=%x",
987 attr->port_num, my_qp, ibqp->qp_num,
988 shca->num_ports);
989 goto modify_qp_exit2;
990 }
991 mqpcb->prim_phys_port = attr->port_num;
992 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT, 1);
993 }
994 if (attr_mask & IB_QP_QKEY) {
995 mqpcb->qkey = attr->qkey;
996 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_QKEY, 1);
997 }
998 if (attr_mask & IB_QP_AV) {
999 int ah_mult = ib_rate_to_mult(attr->ah_attr.static_rate);
1000 int ehca_mult = ib_rate_to_mult(shca->sport[my_qp->
1001 init_attr.port_num].rate);
1002
1003 mqpcb->dlid = attr->ah_attr.dlid;
1004 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DLID, 1);
1005 mqpcb->source_path_bits = attr->ah_attr.src_path_bits;
1006 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS, 1);
1007 mqpcb->service_level = attr->ah_attr.sl;
1008 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL, 1);
1009
1010 if (ah_mult < ehca_mult)
1011 mqpcb->max_static_rate = (ah_mult > 0) ?
1012 ((ehca_mult - 1) / ah_mult) : 0;
1013 else
1014 mqpcb->max_static_rate = 0;
1015
1016 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1);
1017
1018 /*
1019 * only if GRH is TRUE we might consider SOURCE_GID_IDX
1020 * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
1021 */
1022 if (attr->ah_attr.ah_flags == IB_AH_GRH) {
1023 mqpcb->send_grh_flag = 1 << 31;
1024 update_mask |=
1025 EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
1026 mqpcb->source_gid_idx = attr->ah_attr.grh.sgid_index;
1027 update_mask |=
1028 EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX, 1);
1029
1030 for (cnt = 0; cnt < 16; cnt++)
1031 mqpcb->dest_gid.byte[cnt] =
1032 attr->ah_attr.grh.dgid.raw[cnt];
1033
1034 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_GID, 1);
1035 mqpcb->flow_label = attr->ah_attr.grh.flow_label;
1036 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL, 1);
1037 mqpcb->hop_limit = attr->ah_attr.grh.hop_limit;
1038 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT, 1);
1039 mqpcb->traffic_class = attr->ah_attr.grh.traffic_class;
1040 update_mask |=
1041 EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS, 1);
1042 }
1043 }
1044
1045 if (attr_mask & IB_QP_PATH_MTU) {
1046 mqpcb->path_mtu = attr->path_mtu;
1047 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU, 1);
1048 }
1049 if (attr_mask & IB_QP_TIMEOUT) {
1050 mqpcb->timeout = attr->timeout;
1051 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT, 1);
1052 }
1053 if (attr_mask & IB_QP_RETRY_CNT) {
1054 mqpcb->retry_count = attr->retry_cnt;
1055 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT, 1);
1056 }
1057 if (attr_mask & IB_QP_RNR_RETRY) {
1058 mqpcb->rnr_retry_count = attr->rnr_retry;
1059 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT, 1);
1060 }
1061 if (attr_mask & IB_QP_RQ_PSN) {
1062 mqpcb->receive_psn = attr->rq_psn;
1063 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RECEIVE_PSN, 1);
1064 }
1065 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1066 mqpcb->rdma_nr_atomic_resp_res = attr->max_dest_rd_atomic < 3 ?
1067 attr->max_dest_rd_atomic : 2;
1068 update_mask |=
1069 EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
1070 }
1071 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1072 mqpcb->rdma_atomic_outst_dest_qp = attr->max_rd_atomic < 3 ?
1073 attr->max_rd_atomic : 2;
1074 update_mask |=
1075 EHCA_BMASK_SET
1076 (MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP, 1);
1077 }
1078 if (attr_mask & IB_QP_ALT_PATH) {
1079 int ah_mult = ib_rate_to_mult(attr->alt_ah_attr.static_rate);
1080 int ehca_mult = ib_rate_to_mult(
1081 shca->sport[my_qp->init_attr.port_num].rate);
1082
1083 mqpcb->dlid_al = attr->alt_ah_attr.dlid;
1084 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DLID_AL, 1);
1085 mqpcb->source_path_bits_al = attr->alt_ah_attr.src_path_bits;
1086 update_mask |=
1087 EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS_AL, 1);
1088 mqpcb->service_level_al = attr->alt_ah_attr.sl;
1089 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL_AL, 1);
1090
1091 if (ah_mult < ehca_mult)
1092 mqpcb->max_static_rate = (ah_mult > 0) ?
1093 ((ehca_mult - 1) / ah_mult) : 0;
1094 else
1095 mqpcb->max_static_rate_al = 0;
1096
1097 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE_AL, 1);
1098
1099 /*
1100 * only if GRH is TRUE we might consider SOURCE_GID_IDX
1101 * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
1102 */
1103 if (attr->alt_ah_attr.ah_flags == IB_AH_GRH) {
1104 mqpcb->send_grh_flag_al = 1 << 31;
1105 update_mask |=
1106 EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG_AL, 1);
1107 mqpcb->source_gid_idx_al =
1108 attr->alt_ah_attr.grh.sgid_index;
1109 update_mask |=
1110 EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX_AL, 1);
1111
1112 for (cnt = 0; cnt < 16; cnt++)
1113 mqpcb->dest_gid_al.byte[cnt] =
1114 attr->alt_ah_attr.grh.dgid.raw[cnt];
1115
1116 update_mask |=
1117 EHCA_BMASK_SET(MQPCB_MASK_DEST_GID_AL, 1);
1118 mqpcb->flow_label_al = attr->alt_ah_attr.grh.flow_label;
1119 update_mask |=
1120 EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL_AL, 1);
1121 mqpcb->hop_limit_al = attr->alt_ah_attr.grh.hop_limit;
1122 update_mask |=
1123 EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT_AL, 1);
1124 mqpcb->traffic_class_al =
1125 attr->alt_ah_attr.grh.traffic_class;
1126 update_mask |=
1127 EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS_AL, 1);
1128 }
1129 }
1130
1131 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
1132 mqpcb->min_rnr_nak_timer_field = attr->min_rnr_timer;
1133 update_mask |=
1134 EHCA_BMASK_SET(MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD, 1);
1135 }
1136
1137 if (attr_mask & IB_QP_SQ_PSN) {
1138 mqpcb->send_psn = attr->sq_psn;
1139 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_PSN, 1);
1140 }
1141
1142 if (attr_mask & IB_QP_DEST_QPN) {
1143 mqpcb->dest_qp_nr = attr->dest_qp_num;
1144 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_QP_NR, 1);
1145 }
1146
1147 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1148 mqpcb->path_migration_state = attr->path_mig_state;
1149 update_mask |=
1150 EHCA_BMASK_SET(MQPCB_MASK_PATH_MIGRATION_STATE, 1);
1151 }
1152
1153 if (attr_mask & IB_QP_CAP) {
1154 mqpcb->max_nr_outst_send_wr = attr->cap.max_send_wr+1;
1155 update_mask |=
1156 EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_SEND_WR, 1);
1157 mqpcb->max_nr_outst_recv_wr = attr->cap.max_recv_wr+1;
1158 update_mask |=
1159 EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_RECV_WR, 1);
1160 /* no support for max_send/recv_sge yet */
1161 }
1162
1163 if (ehca_debug_level)
1164 ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num);
1165
1166 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
1167 my_qp->ipz_qp_handle,
1168 &my_qp->pf,
1169 update_mask,
1170 mqpcb, my_qp->galpas.kernel);
1171
1172 if (h_ret != H_SUCCESS) {
1173 ret = ehca2ib_return_code(h_ret);
1174 ehca_err(ibqp->device, "hipz_h_modify_qp() failed rc=%lx "
1175 "ehca_qp=%p qp_num=%x",h_ret, my_qp, ibqp->qp_num);
1176 goto modify_qp_exit2;
1177 }
1178
1179 if ((my_qp->qp_type == IB_QPT_UD ||
1180 my_qp->qp_type == IB_QPT_GSI ||
1181 my_qp->qp_type == IB_QPT_SMI) &&
1182 statetrans == IB_QPST_SQE2RTS) {
1183 /* doorbell to reprocessing wqes */
1184 iosync(); /* serialize GAL register access */
1185 hipz_update_sqa(my_qp, bad_wqe_cnt-1);
1186 ehca_gen_dbg("doorbell for %x wqes", bad_wqe_cnt);
1187 }
1188
1189 if (statetrans == IB_QPST_RESET2INIT ||
1190 statetrans == IB_QPST_INIT2INIT) {
1191 mqpcb->qp_enable = 1;
1192 mqpcb->qp_state = EHCA_QPS_INIT;
1193 update_mask = 0;
1194 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
1195
1196 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
1197 my_qp->ipz_qp_handle,
1198 &my_qp->pf,
1199 update_mask,
1200 mqpcb,
1201 my_qp->galpas.kernel);
1202
1203 if (h_ret != H_SUCCESS) {
1204 ret = ehca2ib_return_code(h_ret);
1205 ehca_err(ibqp->device, "ENABLE in context of "
1206 "RESET_2_INIT failed! Maybe you didn't get "
1207 "a LID h_ret=%lx ehca_qp=%p qp_num=%x",
1208 h_ret, my_qp, ibqp->qp_num);
1209 goto modify_qp_exit2;
1210 }
1211 }
1212
1213 if (statetrans == IB_QPST_ANY2RESET) {
1214 ipz_qeit_reset(&my_qp->ipz_rqueue);
1215 ipz_qeit_reset(&my_qp->ipz_squeue);
1216 }
1217
1218 if (attr_mask & IB_QP_QKEY)
1219 my_qp->qkey = attr->qkey;
1220
1221modify_qp_exit2:
1222 if (squeue_locked) { /* this means: sqe -> rts */
1223 spin_unlock_irqrestore(&my_qp->spinlock_s, spl_flags);
1224 my_qp->sqerr_purgeflag = 1;
1225 }
1226
1227modify_qp_exit1:
1228 kfree(mqpcb);
1229
1230 return ret;
1231}
1232
1233int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1234 struct ib_udata *udata)
1235{
1236 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
1237 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
1238 ib_pd);
1239 u32 cur_pid = current->tgid;
1240
1241 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
1242 my_pd->ownpid != cur_pid) {
1243 ehca_err(ibqp->pd->device, "Invalid caller pid=%x ownpid=%x",
1244 cur_pid, my_pd->ownpid);
1245 return -EINVAL;
1246 }
1247
1248 return internal_modify_qp(ibqp, attr, attr_mask, 0);
1249}
1250
1251int ehca_query_qp(struct ib_qp *qp,
1252 struct ib_qp_attr *qp_attr,
1253 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1254{
1255 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
1256 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
1257 ib_pd);
1258 struct ehca_shca *shca = container_of(qp->device, struct ehca_shca,
1259 ib_device);
1260 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
1261 struct hcp_modify_qp_control_block *qpcb;
1262 u32 cur_pid = current->tgid;
1263 int cnt, ret = 0;
1264 u64 h_ret;
1265
1266 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
1267 my_pd->ownpid != cur_pid) {
1268 ehca_err(qp->device, "Invalid caller pid=%x ownpid=%x",
1269 cur_pid, my_pd->ownpid);
1270 return -EINVAL;
1271 }
1272
1273 if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) {
1274 ehca_err(qp->device,"Invalid attribute mask "
1275 "ehca_qp=%p qp_num=%x qp_attr_mask=%x ",
1276 my_qp, qp->qp_num, qp_attr_mask);
1277 return -EINVAL;
1278 }
1279
1280 qpcb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL );
1281 if (!qpcb) {
1282 ehca_err(qp->device,"Out of memory for qpcb "
1283 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
1284 return -ENOMEM;
1285 }
1286
1287 h_ret = hipz_h_query_qp(adapter_handle,
1288 my_qp->ipz_qp_handle,
1289 &my_qp->pf,
1290 qpcb, my_qp->galpas.kernel);
1291
1292 if (h_ret != H_SUCCESS) {
1293 ret = ehca2ib_return_code(h_ret);
1294 ehca_err(qp->device,"hipz_h_query_qp() failed "
1295 "ehca_qp=%p qp_num=%x h_ret=%lx",
1296 my_qp, qp->qp_num, h_ret);
1297 goto query_qp_exit1;
1298 }
1299
1300 qp_attr->cur_qp_state = ehca2ib_qp_state(qpcb->qp_state);
1301 qp_attr->qp_state = qp_attr->cur_qp_state;
1302
1303 if (qp_attr->cur_qp_state == -EINVAL) {
1304 ret = -EINVAL;
1305 ehca_err(qp->device,"Got invalid ehca_qp_state=%x "
1306 "ehca_qp=%p qp_num=%x",
1307 qpcb->qp_state, my_qp, qp->qp_num);
1308 goto query_qp_exit1;
1309 }
1310
1311 if (qp_attr->qp_state == IB_QPS_SQD)
1312 qp_attr->sq_draining = 1;
1313
1314 qp_attr->qkey = qpcb->qkey;
1315 qp_attr->path_mtu = qpcb->path_mtu;
1316 qp_attr->path_mig_state = qpcb->path_migration_state;
1317 qp_attr->rq_psn = qpcb->receive_psn;
1318 qp_attr->sq_psn = qpcb->send_psn;
1319 qp_attr->min_rnr_timer = qpcb->min_rnr_nak_timer_field;
1320 qp_attr->cap.max_send_wr = qpcb->max_nr_outst_send_wr-1;
1321 qp_attr->cap.max_recv_wr = qpcb->max_nr_outst_recv_wr-1;
1322 /* UD_AV CIRCUMVENTION */
1323 if (my_qp->qp_type == IB_QPT_UD) {
1324 qp_attr->cap.max_send_sge =
1325 qpcb->actual_nr_sges_in_sq_wqe - 2;
1326 qp_attr->cap.max_recv_sge =
1327 qpcb->actual_nr_sges_in_rq_wqe - 2;
1328 } else {
1329 qp_attr->cap.max_send_sge =
1330 qpcb->actual_nr_sges_in_sq_wqe;
1331 qp_attr->cap.max_recv_sge =
1332 qpcb->actual_nr_sges_in_rq_wqe;
1333 }
1334
1335 qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size;
1336 qp_attr->dest_qp_num = qpcb->dest_qp_nr;
1337
1338 qp_attr->pkey_index =
1339 EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->prim_p_key_idx);
1340
1341 qp_attr->port_num =
1342 EHCA_BMASK_GET(MQPCB_PRIM_PHYS_PORT, qpcb->prim_phys_port);
1343
1344 qp_attr->timeout = qpcb->timeout;
1345 qp_attr->retry_cnt = qpcb->retry_count;
1346 qp_attr->rnr_retry = qpcb->rnr_retry_count;
1347
1348 qp_attr->alt_pkey_index =
1349 EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->alt_p_key_idx);
1350
1351 qp_attr->alt_port_num = qpcb->alt_phys_port;
1352 qp_attr->alt_timeout = qpcb->timeout_al;
1353
1354 /* primary av */
1355 qp_attr->ah_attr.sl = qpcb->service_level;
1356
1357 if (qpcb->send_grh_flag) {
1358 qp_attr->ah_attr.ah_flags = IB_AH_GRH;
1359 }
1360
1361 qp_attr->ah_attr.static_rate = qpcb->max_static_rate;
1362 qp_attr->ah_attr.dlid = qpcb->dlid;
1363 qp_attr->ah_attr.src_path_bits = qpcb->source_path_bits;
1364 qp_attr->ah_attr.port_num = qp_attr->port_num;
1365
1366 /* primary GRH */
1367 qp_attr->ah_attr.grh.traffic_class = qpcb->traffic_class;
1368 qp_attr->ah_attr.grh.hop_limit = qpcb->hop_limit;
1369 qp_attr->ah_attr.grh.sgid_index = qpcb->source_gid_idx;
1370 qp_attr->ah_attr.grh.flow_label = qpcb->flow_label;
1371
1372 for (cnt = 0; cnt < 16; cnt++)
1373 qp_attr->ah_attr.grh.dgid.raw[cnt] =
1374 qpcb->dest_gid.byte[cnt];
1375
1376 /* alternate AV */
1377 qp_attr->alt_ah_attr.sl = qpcb->service_level_al;
1378 if (qpcb->send_grh_flag_al) {
1379 qp_attr->alt_ah_attr.ah_flags = IB_AH_GRH;
1380 }
1381
1382 qp_attr->alt_ah_attr.static_rate = qpcb->max_static_rate_al;
1383 qp_attr->alt_ah_attr.dlid = qpcb->dlid_al;
1384 qp_attr->alt_ah_attr.src_path_bits = qpcb->source_path_bits_al;
1385
1386 /* alternate GRH */
1387 qp_attr->alt_ah_attr.grh.traffic_class = qpcb->traffic_class_al;
1388 qp_attr->alt_ah_attr.grh.hop_limit = qpcb->hop_limit_al;
1389 qp_attr->alt_ah_attr.grh.sgid_index = qpcb->source_gid_idx_al;
1390 qp_attr->alt_ah_attr.grh.flow_label = qpcb->flow_label_al;
1391
1392 for (cnt = 0; cnt < 16; cnt++)
1393 qp_attr->alt_ah_attr.grh.dgid.raw[cnt] =
1394 qpcb->dest_gid_al.byte[cnt];
1395
1396 /* return init attributes given in ehca_create_qp */
1397 if (qp_init_attr)
1398 *qp_init_attr = my_qp->init_attr;
1399
1400 if (ehca_debug_level)
1401 ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num);
1402
1403query_qp_exit1:
1404 kfree(qpcb);
1405
1406 return ret;
1407}
1408
1409int ehca_destroy_qp(struct ib_qp *ibqp)
1410{
1411 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
1412 struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
1413 ib_device);
1414 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
1415 ib_pd);
1416 u32 cur_pid = current->tgid;
1417 u32 qp_num = ibqp->qp_num;
1418 int ret;
1419 u64 h_ret;
1420 u8 port_num;
1421 enum ib_qp_type qp_type;
1422 unsigned long flags;
1423
1424 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
1425 my_pd->ownpid != cur_pid) {
1426 ehca_err(ibqp->device, "Invalid caller pid=%x ownpid=%x",
1427 cur_pid, my_pd->ownpid);
1428 return -EINVAL;
1429 }
1430
1431 if (my_qp->send_cq) {
1432 ret = ehca_cq_unassign_qp(my_qp->send_cq,
1433 my_qp->real_qp_num);
1434 if (ret) {
1435 ehca_err(ibqp->device, "Couldn't unassign qp from "
1436 "send_cq ret=%x qp_num=%x cq_num=%x", ret,
1437 my_qp->ib_qp.qp_num, my_qp->send_cq->cq_number);
1438 return ret;
1439 }
1440 }
1441
1442 spin_lock_irqsave(&ehca_qp_idr_lock, flags);
1443 idr_remove(&ehca_qp_idr, my_qp->token);
1444 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
1445
1446 /* un-mmap if vma alloc */
1447 if (my_qp->uspace_rqueue) {
1448 ret = ehca_munmap(my_qp->uspace_rqueue,
1449 my_qp->ipz_rqueue.queue_length);
1450 if (ret)
1451 ehca_err(ibqp->device, "Could not munmap rqueue "
1452 "qp_num=%x", qp_num);
1453 ret = ehca_munmap(my_qp->uspace_squeue,
1454 my_qp->ipz_squeue.queue_length);
1455 if (ret)
1456 ehca_err(ibqp->device, "Could not munmap squeue "
1457 "qp_num=%x", qp_num);
1458 ret = ehca_munmap(my_qp->uspace_fwh, EHCA_PAGESIZE);
1459 if (ret)
1460 ehca_err(ibqp->device, "Could not munmap fwh qp_num=%x",
1461 qp_num);
1462 }
1463
1464 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
1465 if (h_ret != H_SUCCESS) {
1466 ehca_err(ibqp->device, "hipz_h_destroy_qp() failed rc=%lx "
1467 "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num);
1468 return ehca2ib_return_code(h_ret);
1469 }
1470
1471 port_num = my_qp->init_attr.port_num;
1472 qp_type = my_qp->init_attr.qp_type;
1473
1474 /* no support for IB_QPT_SMI yet */
1475 if (qp_type == IB_QPT_GSI) {
1476 struct ib_event event;
1477 ehca_info(ibqp->device, "device %s: port %x is inactive.",
1478 shca->ib_device.name, port_num);
1479 event.device = &shca->ib_device;
1480 event.event = IB_EVENT_PORT_ERR;
1481 event.element.port_num = port_num;
1482 shca->sport[port_num - 1].port_state = IB_PORT_DOWN;
1483 ib_dispatch_event(&event);
1484 }
1485
1486 ipz_queue_dtor(&my_qp->ipz_rqueue);
1487 ipz_queue_dtor(&my_qp->ipz_squeue);
1488 kmem_cache_free(qp_cache, my_qp);
1489 return 0;
1490}
1491
1492int ehca_init_qp_cache(void)
1493{
1494 qp_cache = kmem_cache_create("ehca_cache_qp",
1495 sizeof(struct ehca_qp), 0,
1496 SLAB_HWCACHE_ALIGN,
1497 NULL, NULL);
1498 if (!qp_cache)
1499 return -ENOMEM;
1500 return 0;
1501}
1502
1503void ehca_cleanup_qp_cache(void)
1504{
1505 if (qp_cache)
1506 kmem_cache_destroy(qp_cache);
1507}
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
new file mode 100644
index 000000000000..b46bda1bf85d
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -0,0 +1,653 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * post_send/recv, poll_cq, req_notify
5 *
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Reinhard Ernst <rernst@de.ibm.com>
9 *
10 * Copyright (c) 2005 IBM Corporation
11 *
12 * All rights reserved.
13 *
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
15 * BSD.
16 *
17 * OpenIB BSD License
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
21 *
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
24 *
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43
44#include <asm-powerpc/system.h>
45#include "ehca_classes.h"
46#include "ehca_tools.h"
47#include "ehca_qes.h"
48#include "ehca_iverbs.h"
49#include "hcp_if.h"
50#include "hipz_fns.h"
51
52static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
53 struct ehca_wqe *wqe_p,
54 struct ib_recv_wr *recv_wr)
55{
56 u8 cnt_ds;
57 if (unlikely((recv_wr->num_sge < 0) ||
58 (recv_wr->num_sge > ipz_rqueue->act_nr_of_sg))) {
59 ehca_gen_err("Invalid number of WQE SGE. "
60 "num_sqe=%x max_nr_of_sg=%x",
61 recv_wr->num_sge, ipz_rqueue->act_nr_of_sg);
62 return -EINVAL; /* invalid SG list length */
63 }
64
65 /* clear wqe header until sglist */
66 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
67
68 wqe_p->work_request_id = recv_wr->wr_id;
69 wqe_p->nr_of_data_seg = recv_wr->num_sge;
70
71 for (cnt_ds = 0; cnt_ds < recv_wr->num_sge; cnt_ds++) {
72 wqe_p->u.all_rcv.sg_list[cnt_ds].vaddr =
73 recv_wr->sg_list[cnt_ds].addr;
74 wqe_p->u.all_rcv.sg_list[cnt_ds].lkey =
75 recv_wr->sg_list[cnt_ds].lkey;
76 wqe_p->u.all_rcv.sg_list[cnt_ds].length =
77 recv_wr->sg_list[cnt_ds].length;
78 }
79
80 if (ehca_debug_level) {
81 ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", ipz_rqueue);
82 ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
83 }
84
85 return 0;
86}
87
88#if defined(DEBUG_GSI_SEND_WR)
89
90/* need ib_mad struct */
91#include <rdma/ib_mad.h>
92
93static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
94{
95 int idx;
96 int j;
97 while (send_wr) {
98 struct ib_mad_hdr *mad_hdr = send_wr->wr.ud.mad_hdr;
99 struct ib_sge *sge = send_wr->sg_list;
100 ehca_gen_dbg("send_wr#%x wr_id=%lx num_sge=%x "
101 "send_flags=%x opcode=%x",idx, send_wr->wr_id,
102 send_wr->num_sge, send_wr->send_flags,
103 send_wr->opcode);
104 if (mad_hdr) {
105 ehca_gen_dbg("send_wr#%x mad_hdr base_version=%x "
106 "mgmt_class=%x class_version=%x method=%x "
107 "status=%x class_specific=%x tid=%lx "
108 "attr_id=%x resv=%x attr_mod=%x",
109 idx, mad_hdr->base_version,
110 mad_hdr->mgmt_class,
111 mad_hdr->class_version, mad_hdr->method,
112 mad_hdr->status, mad_hdr->class_specific,
113 mad_hdr->tid, mad_hdr->attr_id,
114 mad_hdr->resv,
115 mad_hdr->attr_mod);
116 }
117 for (j = 0; j < send_wr->num_sge; j++) {
118 u8 *data = (u8 *) abs_to_virt(sge->addr);
119 ehca_gen_dbg("send_wr#%x sge#%x addr=%p length=%x "
120 "lkey=%x",
121 idx, j, data, sge->length, sge->lkey);
122 /* assume length is n*16 */
123 ehca_dmp(data, sge->length, "send_wr#%x sge#%x",
124 idx, j);
125 sge++;
126 } /* eof for j */
127 idx++;
128 send_wr = send_wr->next;
129 } /* eof while send_wr */
130}
131
132#endif /* DEBUG_GSI_SEND_WR */
133
134static inline int ehca_write_swqe(struct ehca_qp *qp,
135 struct ehca_wqe *wqe_p,
136 const struct ib_send_wr *send_wr)
137{
138 u32 idx;
139 u64 dma_length;
140 struct ehca_av *my_av;
141 u32 remote_qkey = send_wr->wr.ud.remote_qkey;
142
143 if (unlikely((send_wr->num_sge < 0) ||
144 (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) {
145 ehca_gen_err("Invalid number of WQE SGE. "
146 "num_sqe=%x max_nr_of_sg=%x",
147 send_wr->num_sge, qp->ipz_squeue.act_nr_of_sg);
148 return -EINVAL; /* invalid SG list length */
149 }
150
151 /* clear wqe header until sglist */
152 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
153
154 wqe_p->work_request_id = send_wr->wr_id;
155
156 switch (send_wr->opcode) {
157 case IB_WR_SEND:
158 case IB_WR_SEND_WITH_IMM:
159 wqe_p->optype = WQE_OPTYPE_SEND;
160 break;
161 case IB_WR_RDMA_WRITE:
162 case IB_WR_RDMA_WRITE_WITH_IMM:
163 wqe_p->optype = WQE_OPTYPE_RDMAWRITE;
164 break;
165 case IB_WR_RDMA_READ:
166 wqe_p->optype = WQE_OPTYPE_RDMAREAD;
167 break;
168 default:
169 ehca_gen_err("Invalid opcode=%x", send_wr->opcode);
170 return -EINVAL; /* invalid opcode */
171 }
172
173 wqe_p->wqef = (send_wr->opcode) & WQEF_HIGH_NIBBLE;
174
175 wqe_p->wr_flag = 0;
176
177 if (send_wr->send_flags & IB_SEND_SIGNALED)
178 wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM;
179
180 if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
181 send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
182 /* this might not work as long as HW does not support it */
183 wqe_p->immediate_data = be32_to_cpu(send_wr->imm_data);
184 wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT;
185 }
186
187 wqe_p->nr_of_data_seg = send_wr->num_sge;
188
189 switch (qp->qp_type) {
190 case IB_QPT_SMI:
191 case IB_QPT_GSI:
192 /* no break is intential here */
193 case IB_QPT_UD:
194 /* IB 1.2 spec C10-15 compliance */
195 if (send_wr->wr.ud.remote_qkey & 0x80000000)
196 remote_qkey = qp->qkey;
197
198 wqe_p->destination_qp_number = send_wr->wr.ud.remote_qpn << 8;
199 wqe_p->local_ee_context_qkey = remote_qkey;
200 if (!send_wr->wr.ud.ah) {
201 ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp);
202 return -EINVAL;
203 }
204 my_av = container_of(send_wr->wr.ud.ah, struct ehca_av, ib_ah);
205 wqe_p->u.ud_av.ud_av = my_av->av;
206
207 /*
208 * omitted check of IB_SEND_INLINE
209 * since HW does not support it
210 */
211 for (idx = 0; idx < send_wr->num_sge; idx++) {
212 wqe_p->u.ud_av.sg_list[idx].vaddr =
213 send_wr->sg_list[idx].addr;
214 wqe_p->u.ud_av.sg_list[idx].lkey =
215 send_wr->sg_list[idx].lkey;
216 wqe_p->u.ud_av.sg_list[idx].length =
217 send_wr->sg_list[idx].length;
218 } /* eof for idx */
219 if (qp->qp_type == IB_QPT_SMI ||
220 qp->qp_type == IB_QPT_GSI)
221 wqe_p->u.ud_av.ud_av.pmtu = 1;
222 if (qp->qp_type == IB_QPT_GSI) {
223 wqe_p->pkeyi = send_wr->wr.ud.pkey_index;
224#ifdef DEBUG_GSI_SEND_WR
225 trace_send_wr_ud(send_wr);
226#endif /* DEBUG_GSI_SEND_WR */
227 }
228 break;
229
230 case IB_QPT_UC:
231 if (send_wr->send_flags & IB_SEND_FENCE)
232 wqe_p->wr_flag |= WQE_WRFLAG_FENCE;
233 /* no break is intentional here */
234 case IB_QPT_RC:
235 /* TODO: atomic not implemented */
236 wqe_p->u.nud.remote_virtual_adress =
237 send_wr->wr.rdma.remote_addr;
238 wqe_p->u.nud.rkey = send_wr->wr.rdma.rkey;
239
240 /*
241 * omitted checking of IB_SEND_INLINE
242 * since HW does not support it
243 */
244 dma_length = 0;
245 for (idx = 0; idx < send_wr->num_sge; idx++) {
246 wqe_p->u.nud.sg_list[idx].vaddr =
247 send_wr->sg_list[idx].addr;
248 wqe_p->u.nud.sg_list[idx].lkey =
249 send_wr->sg_list[idx].lkey;
250 wqe_p->u.nud.sg_list[idx].length =
251 send_wr->sg_list[idx].length;
252 dma_length += send_wr->sg_list[idx].length;
253 } /* eof idx */
254 wqe_p->u.nud.atomic_1st_op_dma_len = dma_length;
255
256 break;
257
258 default:
259 ehca_gen_err("Invalid qptype=%x", qp->qp_type);
260 return -EINVAL;
261 }
262
263 if (ehca_debug_level) {
264 ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp);
265 ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe");
266 }
267 return 0;
268}
269
270/* map_ib_wc_status converts raw cqe_status to ib_wc_status */
271static inline void map_ib_wc_status(u32 cqe_status,
272 enum ib_wc_status *wc_status)
273{
274 if (unlikely(cqe_status & WC_STATUS_ERROR_BIT)) {
275 switch (cqe_status & 0x3F) {
276 case 0x01:
277 case 0x21:
278 *wc_status = IB_WC_LOC_LEN_ERR;
279 break;
280 case 0x02:
281 case 0x22:
282 *wc_status = IB_WC_LOC_QP_OP_ERR;
283 break;
284 case 0x03:
285 case 0x23:
286 *wc_status = IB_WC_LOC_EEC_OP_ERR;
287 break;
288 case 0x04:
289 case 0x24:
290 *wc_status = IB_WC_LOC_PROT_ERR;
291 break;
292 case 0x05:
293 case 0x25:
294 *wc_status = IB_WC_WR_FLUSH_ERR;
295 break;
296 case 0x06:
297 *wc_status = IB_WC_MW_BIND_ERR;
298 break;
299 case 0x07: /* remote error - look into bits 20:24 */
300 switch ((cqe_status
301 & WC_STATUS_REMOTE_ERROR_FLAGS) >> 11) {
302 case 0x0:
303 /*
304 * PSN Sequence Error!
305 * couldn't find a matching status!
306 */
307 *wc_status = IB_WC_GENERAL_ERR;
308 break;
309 case 0x1:
310 *wc_status = IB_WC_REM_INV_REQ_ERR;
311 break;
312 case 0x2:
313 *wc_status = IB_WC_REM_ACCESS_ERR;
314 break;
315 case 0x3:
316 *wc_status = IB_WC_REM_OP_ERR;
317 break;
318 case 0x4:
319 *wc_status = IB_WC_REM_INV_RD_REQ_ERR;
320 break;
321 }
322 break;
323 case 0x08:
324 *wc_status = IB_WC_RETRY_EXC_ERR;
325 break;
326 case 0x09:
327 *wc_status = IB_WC_RNR_RETRY_EXC_ERR;
328 break;
329 case 0x0A:
330 case 0x2D:
331 *wc_status = IB_WC_REM_ABORT_ERR;
332 break;
333 case 0x0B:
334 case 0x2E:
335 *wc_status = IB_WC_INV_EECN_ERR;
336 break;
337 case 0x0C:
338 case 0x2F:
339 *wc_status = IB_WC_INV_EEC_STATE_ERR;
340 break;
341 case 0x0D:
342 *wc_status = IB_WC_BAD_RESP_ERR;
343 break;
344 case 0x10:
345 /* WQE purged */
346 *wc_status = IB_WC_WR_FLUSH_ERR;
347 break;
348 default:
349 *wc_status = IB_WC_FATAL_ERR;
350
351 }
352 } else
353 *wc_status = IB_WC_SUCCESS;
354}
355
356int ehca_post_send(struct ib_qp *qp,
357 struct ib_send_wr *send_wr,
358 struct ib_send_wr **bad_send_wr)
359{
360 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
361 struct ib_send_wr *cur_send_wr;
362 struct ehca_wqe *wqe_p;
363 int wqe_cnt = 0;
364 int ret = 0;
365 unsigned long spl_flags;
366
367 /* LOCK the QUEUE */
368 spin_lock_irqsave(&my_qp->spinlock_s, spl_flags);
369
370 /* loop processes list of send reqs */
371 for (cur_send_wr = send_wr; cur_send_wr != NULL;
372 cur_send_wr = cur_send_wr->next) {
373 u64 start_offset = my_qp->ipz_squeue.current_q_offset;
374 /* get pointer next to free WQE */
375 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
376 if (unlikely(!wqe_p)) {
377 /* too many posted work requests: queue overflow */
378 if (bad_send_wr)
379 *bad_send_wr = cur_send_wr;
380 if (wqe_cnt == 0) {
381 ret = -ENOMEM;
382 ehca_err(qp->device, "Too many posted WQEs "
383 "qp_num=%x", qp->qp_num);
384 }
385 goto post_send_exit0;
386 }
387 /* write a SEND WQE into the QUEUE */
388 ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr);
389 /*
390 * if something failed,
391 * reset the free entry pointer to the start value
392 */
393 if (unlikely(ret)) {
394 my_qp->ipz_squeue.current_q_offset = start_offset;
395 *bad_send_wr = cur_send_wr;
396 if (wqe_cnt == 0) {
397 ret = -EINVAL;
398 ehca_err(qp->device, "Could not write WQE "
399 "qp_num=%x", qp->qp_num);
400 }
401 goto post_send_exit0;
402 }
403 wqe_cnt++;
404 ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
405 my_qp, qp->qp_num, wqe_cnt);
406 } /* eof for cur_send_wr */
407
408post_send_exit0:
409 /* UNLOCK the QUEUE */
410 spin_unlock_irqrestore(&my_qp->spinlock_s, spl_flags);
411 iosync(); /* serialize GAL register access */
412 hipz_update_sqa(my_qp, wqe_cnt);
413 return ret;
414}
415
416int ehca_post_recv(struct ib_qp *qp,
417 struct ib_recv_wr *recv_wr,
418 struct ib_recv_wr **bad_recv_wr)
419{
420 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
421 struct ib_recv_wr *cur_recv_wr;
422 struct ehca_wqe *wqe_p;
423 int wqe_cnt = 0;
424 int ret = 0;
425 unsigned long spl_flags;
426
427 /* LOCK the QUEUE */
428 spin_lock_irqsave(&my_qp->spinlock_r, spl_flags);
429
430 /* loop processes list of send reqs */
431 for (cur_recv_wr = recv_wr; cur_recv_wr != NULL;
432 cur_recv_wr = cur_recv_wr->next) {
433 u64 start_offset = my_qp->ipz_rqueue.current_q_offset;
434 /* get pointer next to free WQE */
435 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_rqueue);
436 if (unlikely(!wqe_p)) {
437 /* too many posted work requests: queue overflow */
438 if (bad_recv_wr)
439 *bad_recv_wr = cur_recv_wr;
440 if (wqe_cnt == 0) {
441 ret = -ENOMEM;
442 ehca_err(qp->device, "Too many posted WQEs "
443 "qp_num=%x", qp->qp_num);
444 }
445 goto post_recv_exit0;
446 }
447 /* write a RECV WQE into the QUEUE */
448 ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, cur_recv_wr);
449 /*
450 * if something failed,
451 * reset the free entry pointer to the start value
452 */
453 if (unlikely(ret)) {
454 my_qp->ipz_rqueue.current_q_offset = start_offset;
455 *bad_recv_wr = cur_recv_wr;
456 if (wqe_cnt == 0) {
457 ret = -EINVAL;
458 ehca_err(qp->device, "Could not write WQE "
459 "qp_num=%x", qp->qp_num);
460 }
461 goto post_recv_exit0;
462 }
463 wqe_cnt++;
464 ehca_gen_dbg("ehca_qp=%p qp_num=%x wqe_cnt=%d",
465 my_qp, qp->qp_num, wqe_cnt);
466 } /* eof for cur_recv_wr */
467
468post_recv_exit0:
469 spin_unlock_irqrestore(&my_qp->spinlock_r, spl_flags);
470 iosync(); /* serialize GAL register access */
471 hipz_update_rqa(my_qp, wqe_cnt);
472 return ret;
473}
474
475/*
476 * ib_wc_opcode table converts ehca wc opcode to ib
477 * Since we use zero to indicate invalid opcode, the actual ib opcode must
478 * be decremented!!!
479 */
480static const u8 ib_wc_opcode[255] = {
481 [0x01] = IB_WC_RECV+1,
482 [0x02] = IB_WC_RECV_RDMA_WITH_IMM+1,
483 [0x04] = IB_WC_BIND_MW+1,
484 [0x08] = IB_WC_FETCH_ADD+1,
485 [0x10] = IB_WC_COMP_SWAP+1,
486 [0x20] = IB_WC_RDMA_WRITE+1,
487 [0x40] = IB_WC_RDMA_READ+1,
488 [0x80] = IB_WC_SEND+1
489};
490
491/* internal function to poll one entry of cq */
492static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
493{
494 int ret = 0;
495 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
496 struct ehca_cqe *cqe;
497 int cqe_count = 0;
498
499poll_cq_one_read_cqe:
500 cqe = (struct ehca_cqe *)
501 ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
502 if (!cqe) {
503 ret = -EAGAIN;
504 ehca_dbg(cq->device, "Completion queue is empty ehca_cq=%p "
505 "cq_num=%x ret=%x", my_cq, my_cq->cq_number, ret);
506 goto poll_cq_one_exit0;
507 }
508
509 /* prevents loads being reordered across this point */
510 rmb();
511
512 cqe_count++;
513 if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) {
514 struct ehca_qp *qp=ehca_cq_get_qp(my_cq, cqe->local_qp_number);
515 int purgeflag;
516 unsigned long spl_flags;
517 if (!qp) {
518 ehca_err(cq->device, "cq_num=%x qp_num=%x "
519 "could not find qp -> ignore cqe",
520 my_cq->cq_number, cqe->local_qp_number);
521 ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x",
522 my_cq->cq_number, cqe->local_qp_number);
523 /* ignore this purged cqe */
524 goto poll_cq_one_read_cqe;
525 }
526 spin_lock_irqsave(&qp->spinlock_s, spl_flags);
527 purgeflag = qp->sqerr_purgeflag;
528 spin_unlock_irqrestore(&qp->spinlock_s, spl_flags);
529
530 if (purgeflag) {
531 ehca_dbg(cq->device, "Got CQE with purged bit qp_num=%x "
532 "src_qp=%x",
533 cqe->local_qp_number, cqe->remote_qp_number);
534 if (ehca_debug_level)
535 ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
536 cqe->local_qp_number,
537 cqe->remote_qp_number);
538 /*
539 * ignore this to avoid double cqes of bad wqe
540 * that caused sqe and turn off purge flag
541 */
542 qp->sqerr_purgeflag = 0;
543 goto poll_cq_one_read_cqe;
544 }
545 }
546
547 /* tracing cqe */
548 if (ehca_debug_level) {
549 ehca_dbg(cq->device,
550 "Received COMPLETION ehca_cq=%p cq_num=%x -----",
551 my_cq, my_cq->cq_number);
552 ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
553 my_cq, my_cq->cq_number);
554 ehca_dbg(cq->device,
555 "ehca_cq=%p cq_num=%x -------------------------",
556 my_cq, my_cq->cq_number);
557 }
558
559 /* we got a completion! */
560 wc->wr_id = cqe->work_request_id;
561
562 /* eval ib_wc_opcode */
563 wc->opcode = ib_wc_opcode[cqe->optype]-1;
564 if (unlikely(wc->opcode == -1)) {
565 ehca_err(cq->device, "Invalid cqe->OPType=%x cqe->status=%x "
566 "ehca_cq=%p cq_num=%x",
567 cqe->optype, cqe->status, my_cq, my_cq->cq_number);
568 /* dump cqe for other infos */
569 ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
570 my_cq, my_cq->cq_number);
571 /* update also queue adder to throw away this entry!!! */
572 goto poll_cq_one_exit0;
573 }
574 /* eval ib_wc_status */
575 if (unlikely(cqe->status & WC_STATUS_ERROR_BIT)) {
576 /* complete with errors */
577 map_ib_wc_status(cqe->status, &wc->status);
578 wc->vendor_err = wc->status;
579 } else
580 wc->status = IB_WC_SUCCESS;
581
582 wc->qp_num = cqe->local_qp_number;
583 wc->byte_len = cqe->nr_bytes_transferred;
584 wc->pkey_index = cqe->pkey_index;
585 wc->slid = cqe->rlid;
586 wc->dlid_path_bits = cqe->dlid;
587 wc->src_qp = cqe->remote_qp_number;
588 wc->wc_flags = cqe->w_completion_flags;
589 wc->imm_data = cpu_to_be32(cqe->immediate_data);
590 wc->sl = cqe->service_level;
591
592 if (wc->status != IB_WC_SUCCESS)
593 ehca_dbg(cq->device,
594 "ehca_cq=%p cq_num=%x WARNING unsuccessful cqe "
595 "OPType=%x status=%x qp_num=%x src_qp=%x wr_id=%lx "
596 "cqe=%p", my_cq, my_cq->cq_number, cqe->optype,
597 cqe->status, cqe->local_qp_number,
598 cqe->remote_qp_number, cqe->work_request_id, cqe);
599
600poll_cq_one_exit0:
601 if (cqe_count > 0)
602 hipz_update_feca(my_cq, cqe_count);
603
604 return ret;
605}
606
607int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
608{
609 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
610 int nr;
611 struct ib_wc *current_wc = wc;
612 int ret = 0;
613 unsigned long spl_flags;
614
615 if (num_entries < 1) {
616 ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p "
617 "cq_num=%x", num_entries, my_cq, my_cq->cq_number);
618 ret = -EINVAL;
619 goto poll_cq_exit0;
620 }
621
622 spin_lock_irqsave(&my_cq->spinlock, spl_flags);
623 for (nr = 0; nr < num_entries; nr++) {
624 ret = ehca_poll_cq_one(cq, current_wc);
625 if (ret)
626 break;
627 current_wc++;
628 } /* eof for nr */
629 spin_unlock_irqrestore(&my_cq->spinlock, spl_flags);
630 if (ret == -EAGAIN || !ret)
631 ret = nr;
632
633poll_cq_exit0:
634 return ret;
635}
636
637int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify)
638{
639 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
640
641 switch (cq_notify) {
642 case IB_CQ_SOLICITED:
643 hipz_set_cqx_n0(my_cq, 1);
644 break;
645 case IB_CQ_NEXT_COMP:
646 hipz_set_cqx_n1(my_cq, 1);
647 break;
648 default:
649 return -EINVAL;
650 }
651
652 return 0;
653}
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
new file mode 100644
index 000000000000..9f16e9c79394
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -0,0 +1,111 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * SQP functions
5 *
6 * Authors: Khadija Souissi <souissi@de.ibm.com>
7 * Heiko J Schick <schickhj@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42
43#include <linux/module.h>
44#include <linux/err.h>
45#include "ehca_classes.h"
46#include "ehca_tools.h"
47#include "ehca_qes.h"
48#include "ehca_iverbs.h"
49#include "hcp_if.h"
50
51
52/**
53 * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue
54 * pair is created successfully, the corresponding port gets active.
55 *
56 * Define Special Queue pair 0 (SMI QP) is still not supported.
57 *
58 * @qp_init_attr: Queue pair init attributes with port and queue pair type
59 */
60
61u64 ehca_define_sqp(struct ehca_shca *shca,
62 struct ehca_qp *ehca_qp,
63 struct ib_qp_init_attr *qp_init_attr)
64{
65 u32 pma_qp_nr, bma_qp_nr;
66 u64 ret;
67 u8 port = qp_init_attr->port_num;
68 int counter;
69
70 shca->sport[port - 1].port_state = IB_PORT_DOWN;
71
72 switch (qp_init_attr->qp_type) {
73 case IB_QPT_SMI:
74 /* function not supported yet */
75 break;
76 case IB_QPT_GSI:
77 ret = hipz_h_define_aqp1(shca->ipz_hca_handle,
78 ehca_qp->ipz_qp_handle,
79 ehca_qp->galpas.kernel,
80 (u32) qp_init_attr->port_num,
81 &pma_qp_nr, &bma_qp_nr);
82
83 if (ret != H_SUCCESS) {
84 ehca_err(&shca->ib_device,
85 "Can't define AQP1 for port %x. rc=%lx",
86 port, ret);
87 return ret;
88 }
89 break;
90 default:
91 ehca_err(&shca->ib_device, "invalid qp_type=%x",
92 qp_init_attr->qp_type);
93 return H_PARAMETER;
94 }
95
96 for (counter = 0;
97 shca->sport[port - 1].port_state != IB_PORT_ACTIVE &&
98 counter < ehca_port_act_time;
99 counter++) {
100 ehca_dbg(&shca->ib_device, "... wait until port %x is active",
101 port);
102 msleep_interruptible(1000);
103 }
104
105 if (counter == ehca_port_act_time) {
106 ehca_err(&shca->ib_device, "Port %x is not active.", port);
107 return H_HARDWARE;
108 }
109
110 return H_SUCCESS;
111}
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/infiniband/hw/ehca/ehca_tools.h
new file mode 100644
index 000000000000..9f56bb846d93
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_tools.h
@@ -0,0 +1,172 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * auxiliary functions
5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Khadija Souissi <souissik@de.ibm.com>
9 * Waleri Fomin <fomin@de.ibm.com>
10 * Heiko J Schick <schickhj@de.ibm.com>
11 *
12 * Copyright (c) 2005 IBM Corporation
13 *
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
15 * BSD.
16 *
17 * OpenIB BSD License
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
21 *
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
24 *
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43
44#ifndef EHCA_TOOLS_H
45#define EHCA_TOOLS_H
46
47#include <linux/kernel.h>
48#include <linux/spinlock.h>
49#include <linux/delay.h>
50#include <linux/idr.h>
51#include <linux/kthread.h>
52#include <linux/mm.h>
53#include <linux/mman.h>
54#include <linux/module.h>
55#include <linux/moduleparam.h>
56#include <linux/vmalloc.h>
57#include <linux/version.h>
58#include <linux/notifier.h>
59#include <linux/cpu.h>
60#include <linux/device.h>
61
62#include <asm/abs_addr.h>
63#include <asm/ibmebus.h>
64#include <asm/io.h>
65#include <asm/pgtable.h>
66
67extern int ehca_debug_level;
68
69#define ehca_dbg(ib_dev, format, arg...) \
70 do { \
71 if (unlikely(ehca_debug_level)) \
72 dev_printk(KERN_DEBUG, (ib_dev)->dma_device, \
73 "PU%04x EHCA_DBG:%s " format "\n", \
74 get_paca()->paca_index, __FUNCTION__, \
75 ## arg); \
76 } while (0)
77
78#define ehca_info(ib_dev, format, arg...) \
79 dev_info((ib_dev)->dma_device, "PU%04x EHCA_INFO:%s " format "\n", \
80 get_paca()->paca_index, __FUNCTION__, ## arg)
81
82#define ehca_warn(ib_dev, format, arg...) \
83 dev_warn((ib_dev)->dma_device, "PU%04x EHCA_WARN:%s " format "\n", \
84 get_paca()->paca_index, __FUNCTION__, ## arg)
85
86#define ehca_err(ib_dev, format, arg...) \
87 dev_err((ib_dev)->dma_device, "PU%04x EHCA_ERR:%s " format "\n", \
88 get_paca()->paca_index, __FUNCTION__, ## arg)
89
90/* use this one only if no ib_dev available */
91#define ehca_gen_dbg(format, arg...) \
92 do { \
93 if (unlikely(ehca_debug_level)) \
94 printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n",\
95 get_paca()->paca_index, __FUNCTION__, ## arg); \
96 } while (0)
97
98#define ehca_gen_warn(format, arg...) \
99 do { \
100 if (unlikely(ehca_debug_level)) \
101 printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n",\
102 get_paca()->paca_index, __FUNCTION__, ## arg); \
103 } while (0)
104
105#define ehca_gen_err(format, arg...) \
106 printk(KERN_ERR "PU%04x EHCA_ERR:%s " format "\n", \
107 get_paca()->paca_index, __FUNCTION__, ## arg)
108
109/**
110 * ehca_dmp - printk a memory block, whose length is n*8 bytes.
111 * Each line has the following layout:
112 * <format string> adr=X ofs=Y <8 bytes hex> <8 bytes hex>
113 */
114#define ehca_dmp(adr, len, format, args...) \
115 do { \
116 unsigned int x; \
117 unsigned int l = (unsigned int)(len); \
118 unsigned char *deb = (unsigned char*)(adr); \
119 for (x = 0; x < l; x += 16) { \
120 printk("EHCA_DMP:%s" format \
121 " adr=%p ofs=%04x %016lx %016lx\n", \
122 __FUNCTION__, ##args, deb, x, \
123 *((u64 *)&deb[0]), *((u64 *)&deb[8])); \
124 deb += 16; \
125 } \
126 } while (0)
127
128/* define a bitmask, little endian version */
129#define EHCA_BMASK(pos,length) (((pos)<<16)+(length))
130
131/* define a bitmask, the ibm way... */
132#define EHCA_BMASK_IBM(from,to) (((63-to)<<16)+((to)-(from)+1))
133
134/* internal function, don't use */
135#define EHCA_BMASK_SHIFTPOS(mask) (((mask)>>16)&0xffff)
136
137/* internal function, don't use */
138#define EHCA_BMASK_MASK(mask) (0xffffffffffffffffULL >> ((64-(mask))&0xffff))
139
140/**
141 * EHCA_BMASK_SET - return value shifted and masked by mask
142 * variable|=EHCA_BMASK_SET(MY_MASK,0x4711) ORs the bits in variable
143 * variable&=~EHCA_BMASK_SET(MY_MASK,-1) clears the bits from the mask
144 * in variable
145 */
146#define EHCA_BMASK_SET(mask,value) \
147 ((EHCA_BMASK_MASK(mask) & ((u64)(value)))<<EHCA_BMASK_SHIFTPOS(mask))
148
149/**
150 * EHCA_BMASK_GET - extract a parameter from value by mask
151 */
152#define EHCA_BMASK_GET(mask,value) \
153 (EHCA_BMASK_MASK(mask)& (((u64)(value))>>EHCA_BMASK_SHIFTPOS(mask)))
154
155
156/* Converts ehca to ib return code */
157static inline int ehca2ib_return_code(u64 ehca_rc)
158{
159 switch (ehca_rc) {
160 case H_SUCCESS:
161 return 0;
162 case H_BUSY:
163 return -EBUSY;
164 case H_NO_MEM:
165 return -ENOMEM;
166 default:
167 return -EINVAL;
168 }
169}
170
171
172#endif /* EHCA_TOOLS_H */
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
new file mode 100644
index 000000000000..e08764e4aef2
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -0,0 +1,392 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * userspace support verbs
5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Heiko J Schick <schickhj@de.ibm.com>
9 *
10 * Copyright (c) 2005 IBM Corporation
11 *
12 * All rights reserved.
13 *
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
15 * BSD.
16 *
17 * OpenIB BSD License
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
21 *
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
24 *
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43#include <asm/current.h>
44
45#include "ehca_classes.h"
46#include "ehca_iverbs.h"
47#include "ehca_mrmw.h"
48#include "ehca_tools.h"
49#include "hcp_if.h"
50
51struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
52 struct ib_udata *udata)
53{
54 struct ehca_ucontext *my_context;
55
56 my_context = kzalloc(sizeof *my_context, GFP_KERNEL);
57 if (!my_context) {
58 ehca_err(device, "Out of memory device=%p", device);
59 return ERR_PTR(-ENOMEM);
60 }
61
62 return &my_context->ib_ucontext;
63}
64
65int ehca_dealloc_ucontext(struct ib_ucontext *context)
66{
67 kfree(container_of(context, struct ehca_ucontext, ib_ucontext));
68 return 0;
69}
70
71struct page *ehca_nopage(struct vm_area_struct *vma,
72 unsigned long address, int *type)
73{
74 struct page *mypage = NULL;
75 u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT;
76 u32 idr_handle = fileoffset >> 32;
77 u32 q_type = (fileoffset >> 28) & 0xF; /* CQ, QP,... */
78 u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */
79 u32 cur_pid = current->tgid;
80 unsigned long flags;
81 struct ehca_cq *cq;
82 struct ehca_qp *qp;
83 struct ehca_pd *pd;
84 u64 offset;
85 void *vaddr;
86
87 switch (q_type) {
88 case 1: /* CQ */
89 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
90 cq = idr_find(&ehca_cq_idr, idr_handle);
91 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
92
93 /* make sure this mmap really belongs to the authorized user */
94 if (!cq) {
95 ehca_gen_err("cq is NULL ret=NOPAGE_SIGBUS");
96 return NOPAGE_SIGBUS;
97 }
98
99 if (cq->ownpid != cur_pid) {
100 ehca_err(cq->ib_cq.device,
101 "Invalid caller pid=%x ownpid=%x",
102 cur_pid, cq->ownpid);
103 return NOPAGE_SIGBUS;
104 }
105
106 if (rsrc_type == 2) {
107 ehca_dbg(cq->ib_cq.device, "cq=%p cq queuearea", cq);
108 offset = address - vma->vm_start;
109 vaddr = ipz_qeit_calc(&cq->ipz_queue, offset);
110 ehca_dbg(cq->ib_cq.device, "offset=%lx vaddr=%p",
111 offset, vaddr);
112 mypage = virt_to_page(vaddr);
113 }
114 break;
115
116 case 2: /* QP */
117 spin_lock_irqsave(&ehca_qp_idr_lock, flags);
118 qp = idr_find(&ehca_qp_idr, idr_handle);
119 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
120
121 /* make sure this mmap really belongs to the authorized user */
122 if (!qp) {
123 ehca_gen_err("qp is NULL ret=NOPAGE_SIGBUS");
124 return NOPAGE_SIGBUS;
125 }
126
127 pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd);
128 if (pd->ownpid != cur_pid) {
129 ehca_err(qp->ib_qp.device,
130 "Invalid caller pid=%x ownpid=%x",
131 cur_pid, pd->ownpid);
132 return NOPAGE_SIGBUS;
133 }
134
135 if (rsrc_type == 2) { /* rqueue */
136 ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueuearea", qp);
137 offset = address - vma->vm_start;
138 vaddr = ipz_qeit_calc(&qp->ipz_rqueue, offset);
139 ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p",
140 offset, vaddr);
141 mypage = virt_to_page(vaddr);
142 } else if (rsrc_type == 3) { /* squeue */
143 ehca_dbg(qp->ib_qp.device, "qp=%p qp squeuearea", qp);
144 offset = address - vma->vm_start;
145 vaddr = ipz_qeit_calc(&qp->ipz_squeue, offset);
146 ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p",
147 offset, vaddr);
148 mypage = virt_to_page(vaddr);
149 }
150 break;
151
152 default:
153 ehca_gen_err("bad queue type %x", q_type);
154 return NOPAGE_SIGBUS;
155 }
156
157 if (!mypage) {
158 ehca_gen_err("Invalid page adr==NULL ret=NOPAGE_SIGBUS");
159 return NOPAGE_SIGBUS;
160 }
161 get_page(mypage);
162
163 return mypage;
164}
165
166static struct vm_operations_struct ehcau_vm_ops = {
167 .nopage = ehca_nopage,
168};
169
170int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
171{
172 u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT;
173 u32 idr_handle = fileoffset >> 32;
174 u32 q_type = (fileoffset >> 28) & 0xF; /* CQ, QP,... */
175 u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */
176 u32 cur_pid = current->tgid;
177 u32 ret;
178 u64 vsize, physical;
179 unsigned long flags;
180 struct ehca_cq *cq;
181 struct ehca_qp *qp;
182 struct ehca_pd *pd;
183
184 switch (q_type) {
185 case 1: /* CQ */
186 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
187 cq = idr_find(&ehca_cq_idr, idr_handle);
188 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
189
190 /* make sure this mmap really belongs to the authorized user */
191 if (!cq)
192 return -EINVAL;
193
194 if (cq->ownpid != cur_pid) {
195 ehca_err(cq->ib_cq.device,
196 "Invalid caller pid=%x ownpid=%x",
197 cur_pid, cq->ownpid);
198 return -ENOMEM;
199 }
200
201 if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
202 return -EINVAL;
203
204 switch (rsrc_type) {
205 case 1: /* galpa fw handle */
206 ehca_dbg(cq->ib_cq.device, "cq=%p cq triggerarea", cq);
207 vma->vm_flags |= VM_RESERVED;
208 vsize = vma->vm_end - vma->vm_start;
209 if (vsize != EHCA_PAGESIZE) {
210 ehca_err(cq->ib_cq.device, "invalid vsize=%lx",
211 vma->vm_end - vma->vm_start);
212 return -EINVAL;
213 }
214
215 physical = cq->galpas.user.fw_handle;
216 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
217 vma->vm_flags |= VM_IO | VM_RESERVED;
218
219 ehca_dbg(cq->ib_cq.device,
220 "vsize=%lx physical=%lx", vsize, physical);
221 ret = remap_pfn_range(vma, vma->vm_start,
222 physical >> PAGE_SHIFT, vsize,
223 vma->vm_page_prot);
224 if (ret) {
225 ehca_err(cq->ib_cq.device,
226 "remap_pfn_range() failed ret=%x",
227 ret);
228 return -ENOMEM;
229 }
230 break;
231
232 case 2: /* cq queue_addr */
233 ehca_dbg(cq->ib_cq.device, "cq=%p cq q_addr", cq);
234 vma->vm_flags |= VM_RESERVED;
235 vma->vm_ops = &ehcau_vm_ops;
236 break;
237
238 default:
239 ehca_err(cq->ib_cq.device, "bad resource type %x",
240 rsrc_type);
241 return -EINVAL;
242 }
243 break;
244
245 case 2: /* QP */
246 spin_lock_irqsave(&ehca_qp_idr_lock, flags);
247 qp = idr_find(&ehca_qp_idr, idr_handle);
248 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
249
250 /* make sure this mmap really belongs to the authorized user */
251 if (!qp)
252 return -EINVAL;
253
254 pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd);
255 if (pd->ownpid != cur_pid) {
256 ehca_err(qp->ib_qp.device,
257 "Invalid caller pid=%x ownpid=%x",
258 cur_pid, pd->ownpid);
259 return -ENOMEM;
260 }
261
262 if (!qp->ib_qp.uobject || qp->ib_qp.uobject->context != context)
263 return -EINVAL;
264
265 switch (rsrc_type) {
266 case 1: /* galpa fw handle */
267 ehca_dbg(qp->ib_qp.device, "qp=%p qp triggerarea", qp);
268 vma->vm_flags |= VM_RESERVED;
269 vsize = vma->vm_end - vma->vm_start;
270 if (vsize != EHCA_PAGESIZE) {
271 ehca_err(qp->ib_qp.device, "invalid vsize=%lx",
272 vma->vm_end - vma->vm_start);
273 return -EINVAL;
274 }
275
276 physical = qp->galpas.user.fw_handle;
277 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
278 vma->vm_flags |= VM_IO | VM_RESERVED;
279
280 ehca_dbg(qp->ib_qp.device, "vsize=%lx physical=%lx",
281 vsize, physical);
282 ret = remap_pfn_range(vma, vma->vm_start,
283 physical >> PAGE_SHIFT, vsize,
284 vma->vm_page_prot);
285 if (ret) {
286 ehca_err(qp->ib_qp.device,
287 "remap_pfn_range() failed ret=%x",
288 ret);
289 return -ENOMEM;
290 }
291 break;
292
293 case 2: /* qp rqueue_addr */
294 ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueue_addr", qp);
295 vma->vm_flags |= VM_RESERVED;
296 vma->vm_ops = &ehcau_vm_ops;
297 break;
298
299 case 3: /* qp squeue_addr */
300 ehca_dbg(qp->ib_qp.device, "qp=%p qp squeue_addr", qp);
301 vma->vm_flags |= VM_RESERVED;
302 vma->vm_ops = &ehcau_vm_ops;
303 break;
304
305 default:
306 ehca_err(qp->ib_qp.device, "bad resource type %x",
307 rsrc_type);
308 return -EINVAL;
309 }
310 break;
311
312 default:
313 ehca_gen_err("bad queue type %x", q_type);
314 return -EINVAL;
315 }
316
317 return 0;
318}
319
320int ehca_mmap_nopage(u64 foffset, u64 length, void **mapped,
321 struct vm_area_struct **vma)
322{
323 down_write(&current->mm->mmap_sem);
324 *mapped = (void*)do_mmap(NULL,0, length, PROT_WRITE,
325 MAP_SHARED | MAP_ANONYMOUS,
326 foffset);
327 up_write(&current->mm->mmap_sem);
328 if (!(*mapped)) {
329 ehca_gen_err("couldn't mmap foffset=%lx length=%lx",
330 foffset, length);
331 return -EINVAL;
332 }
333
334 *vma = find_vma(current->mm, (u64)*mapped);
335 if (!(*vma)) {
336 down_write(&current->mm->mmap_sem);
337 do_munmap(current->mm, 0, length);
338 up_write(&current->mm->mmap_sem);
339 ehca_gen_err("couldn't find vma queue=%p", *mapped);
340 return -EINVAL;
341 }
342 (*vma)->vm_flags |= VM_RESERVED;
343 (*vma)->vm_ops = &ehcau_vm_ops;
344
345 return 0;
346}
347
348int ehca_mmap_register(u64 physical, void **mapped,
349 struct vm_area_struct **vma)
350{
351 int ret;
352 unsigned long vsize;
353 /* ehca hw supports only 4k page */
354 ret = ehca_mmap_nopage(0, EHCA_PAGESIZE, mapped, vma);
355 if (ret) {
356 ehca_gen_err("could'nt mmap physical=%lx", physical);
357 return ret;
358 }
359
360 (*vma)->vm_flags |= VM_RESERVED;
361 vsize = (*vma)->vm_end - (*vma)->vm_start;
362 if (vsize != EHCA_PAGESIZE) {
363 ehca_gen_err("invalid vsize=%lx",
364 (*vma)->vm_end - (*vma)->vm_start);
365 return -EINVAL;
366 }
367
368 (*vma)->vm_page_prot = pgprot_noncached((*vma)->vm_page_prot);
369 (*vma)->vm_flags |= VM_IO | VM_RESERVED;
370
371 ret = remap_pfn_range((*vma), (*vma)->vm_start,
372 physical >> PAGE_SHIFT, vsize,
373 (*vma)->vm_page_prot);
374 if (ret) {
375 ehca_gen_err("remap_pfn_range() failed ret=%x", ret);
376 return -ENOMEM;
377 }
378
379 return 0;
380
381}
382
383int ehca_munmap(unsigned long addr, size_t len) {
384 int ret = 0;
385 struct mm_struct *mm = current->mm;
386 if (mm) {
387 down_write(&mm->mmap_sem);
388 ret = do_munmap(mm, addr, len);
389 up_write(&mm->mmap_sem);
390 }
391 return ret;
392}
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c
new file mode 100644
index 000000000000..3fb46e67df87
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/hcp_if.c
@@ -0,0 +1,874 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Firmware Infiniband Interface code for POWER
5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Gerd Bayer <gerd.bayer@de.ibm.com>
9 * Waleri Fomin <fomin@de.ibm.com>
10 *
11 * Copyright (c) 2005 IBM Corporation
12 *
13 * All rights reserved.
14 *
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
16 * BSD.
17 *
18 * OpenIB BSD License
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
22 *
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
25 *
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
42 */
43
44#include <asm/hvcall.h>
45#include "ehca_tools.h"
46#include "hcp_if.h"
47#include "hcp_phyp.h"
48#include "hipz_fns.h"
49#include "ipz_pt_fn.h"
50
51#define H_ALL_RES_QP_ENHANCED_OPS EHCA_BMASK_IBM(9, 11)
52#define H_ALL_RES_QP_PTE_PIN EHCA_BMASK_IBM(12, 12)
53#define H_ALL_RES_QP_SERVICE_TYPE EHCA_BMASK_IBM(13, 15)
54#define H_ALL_RES_QP_LL_RQ_CQE_POSTING EHCA_BMASK_IBM(18, 18)
55#define H_ALL_RES_QP_LL_SQ_CQE_POSTING EHCA_BMASK_IBM(19, 21)
56#define H_ALL_RES_QP_SIGNALING_TYPE EHCA_BMASK_IBM(22, 23)
57#define H_ALL_RES_QP_UD_AV_LKEY_CTRL EHCA_BMASK_IBM(31, 31)
58#define H_ALL_RES_QP_RESOURCE_TYPE EHCA_BMASK_IBM(56, 63)
59
60#define H_ALL_RES_QP_MAX_OUTST_SEND_WR EHCA_BMASK_IBM(0, 15)
61#define H_ALL_RES_QP_MAX_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31)
62#define H_ALL_RES_QP_MAX_SEND_SGE EHCA_BMASK_IBM(32, 39)
63#define H_ALL_RES_QP_MAX_RECV_SGE EHCA_BMASK_IBM(40, 47)
64
65#define H_ALL_RES_QP_ACT_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
66#define H_ALL_RES_QP_ACT_OUTST_RECV_WR EHCA_BMASK_IBM(48, 63)
67#define H_ALL_RES_QP_ACT_SEND_SGE EHCA_BMASK_IBM(8, 15)
68#define H_ALL_RES_QP_ACT_RECV_SGE EHCA_BMASK_IBM(24, 31)
69
70#define H_ALL_RES_QP_SQUEUE_SIZE_PAGES EHCA_BMASK_IBM(0, 31)
71#define H_ALL_RES_QP_RQUEUE_SIZE_PAGES EHCA_BMASK_IBM(32, 63)
72
73/* direct access qp controls */
74#define DAQP_CTRL_ENABLE 0x01
75#define DAQP_CTRL_SEND_COMP 0x20
76#define DAQP_CTRL_RECV_COMP 0x40
77
78static u32 get_longbusy_msecs(int longbusy_rc)
79{
80 switch (longbusy_rc) {
81 case H_LONG_BUSY_ORDER_1_MSEC:
82 return 1;
83 case H_LONG_BUSY_ORDER_10_MSEC:
84 return 10;
85 case H_LONG_BUSY_ORDER_100_MSEC:
86 return 100;
87 case H_LONG_BUSY_ORDER_1_SEC:
88 return 1000;
89 case H_LONG_BUSY_ORDER_10_SEC:
90 return 10000;
91 case H_LONG_BUSY_ORDER_100_SEC:
92 return 100000;
93 default:
94 return 1;
95 }
96}
97
98static long ehca_plpar_hcall_norets(unsigned long opcode,
99 unsigned long arg1,
100 unsigned long arg2,
101 unsigned long arg3,
102 unsigned long arg4,
103 unsigned long arg5,
104 unsigned long arg6,
105 unsigned long arg7)
106{
107 long ret;
108 int i, sleep_msecs;
109
110 ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx "
111 "arg5=%lx arg6=%lx arg7=%lx",
112 opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
113
114 for (i = 0; i < 5; i++) {
115 ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
116 arg5, arg6, arg7);
117
118 if (H_IS_LONG_BUSY(ret)) {
119 sleep_msecs = get_longbusy_msecs(ret);
120 msleep_interruptible(sleep_msecs);
121 continue;
122 }
123
124 if (ret < H_SUCCESS)
125 ehca_gen_err("opcode=%lx ret=%lx"
126 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
127 " arg5=%lx arg6=%lx arg7=%lx ",
128 opcode, ret,
129 arg1, arg2, arg3, arg4, arg5,
130 arg6, arg7);
131
132 ehca_gen_dbg("opcode=%lx ret=%lx", opcode, ret);
133 return ret;
134
135 }
136
137 return H_BUSY;
138}
139
140static long ehca_plpar_hcall9(unsigned long opcode,
141 unsigned long *outs, /* array of 9 outputs */
142 unsigned long arg1,
143 unsigned long arg2,
144 unsigned long arg3,
145 unsigned long arg4,
146 unsigned long arg5,
147 unsigned long arg6,
148 unsigned long arg7,
149 unsigned long arg8,
150 unsigned long arg9)
151{
152 long ret;
153 int i, sleep_msecs;
154
155 ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx "
156 "arg5=%lx arg6=%lx arg7=%lx arg8=%lx arg9=%lx",
157 opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7,
158 arg8, arg9);
159
160 for (i = 0; i < 5; i++) {
161 ret = plpar_hcall9(opcode, outs,
162 arg1, arg2, arg3, arg4, arg5,
163 arg6, arg7, arg8, arg9);
164
165 if (H_IS_LONG_BUSY(ret)) {
166 sleep_msecs = get_longbusy_msecs(ret);
167 msleep_interruptible(sleep_msecs);
168 continue;
169 }
170
171 if (ret < H_SUCCESS)
172 ehca_gen_err("opcode=%lx ret=%lx"
173 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
174 " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
175 " arg9=%lx"
176 " out1=%lx out2=%lx out3=%lx out4=%lx"
177 " out5=%lx out6=%lx out7=%lx out8=%lx"
178 " out9=%lx",
179 opcode, ret,
180 arg1, arg2, arg3, arg4, arg5,
181 arg6, arg7, arg8, arg9,
182 outs[0], outs[1], outs[2], outs[3],
183 outs[4], outs[5], outs[6], outs[7],
184 outs[8]);
185
186 ehca_gen_dbg("opcode=%lx ret=%lx out1=%lx out2=%lx out3=%lx "
187 "out4=%lx out5=%lx out6=%lx out7=%lx out8=%lx "
188 "out9=%lx",
189 opcode, ret, outs[0], outs[1], outs[2], outs[3],
190 outs[4], outs[5], outs[6], outs[7], outs[8]);
191 return ret;
192
193 }
194
195 return H_BUSY;
196}
197u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
198 struct ehca_pfeq *pfeq,
199 const u32 neq_control,
200 const u32 number_of_entries,
201 struct ipz_eq_handle *eq_handle,
202 u32 *act_nr_of_entries,
203 u32 *act_pages,
204 u32 *eq_ist)
205{
206 u64 ret;
207 u64 outs[PLPAR_HCALL9_BUFSIZE];
208 u64 allocate_controls;
209
210 /* resource type */
211 allocate_controls = 3ULL;
212
213 /* ISN is associated */
214 if (neq_control != 1)
215 allocate_controls = (1ULL << (63 - 7)) | allocate_controls;
216 else /* notification event queue */
217 allocate_controls = (1ULL << 63) | allocate_controls;
218
219 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
220 adapter_handle.handle, /* r4 */
221 allocate_controls, /* r5 */
222 number_of_entries, /* r6 */
223 0, 0, 0, 0, 0, 0);
224 eq_handle->handle = outs[0];
225 *act_nr_of_entries = (u32)outs[3];
226 *act_pages = (u32)outs[4];
227 *eq_ist = (u32)outs[5];
228
229 if (ret == H_NOT_ENOUGH_RESOURCES)
230 ehca_gen_err("Not enough resource - ret=%lx ", ret);
231
232 return ret;
233}
234
235u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
236 struct ipz_eq_handle eq_handle,
237 const u64 event_mask)
238{
239 return ehca_plpar_hcall_norets(H_RESET_EVENTS,
240 adapter_handle.handle, /* r4 */
241 eq_handle.handle, /* r5 */
242 event_mask, /* r6 */
243 0, 0, 0, 0);
244}
245
246u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
247 struct ehca_cq *cq,
248 struct ehca_alloc_cq_parms *param)
249{
250 u64 ret;
251 u64 outs[PLPAR_HCALL9_BUFSIZE];
252
253 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
254 adapter_handle.handle, /* r4 */
255 2, /* r5 */
256 param->eq_handle.handle, /* r6 */
257 cq->token, /* r7 */
258 param->nr_cqe, /* r8 */
259 0, 0, 0, 0);
260 cq->ipz_cq_handle.handle = outs[0];
261 param->act_nr_of_entries = (u32)outs[3];
262 param->act_pages = (u32)outs[4];
263
264 if (ret == H_SUCCESS)
265 hcp_galpas_ctor(&cq->galpas, outs[5], outs[6]);
266
267 if (ret == H_NOT_ENOUGH_RESOURCES)
268 ehca_gen_err("Not enough resources. ret=%lx", ret);
269
270 return ret;
271}
272
273u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
274 struct ehca_qp *qp,
275 struct ehca_alloc_qp_parms *parms)
276{
277 u64 ret;
278 u64 allocate_controls;
279 u64 max_r10_reg;
280 u64 outs[PLPAR_HCALL9_BUFSIZE];
281 u16 max_nr_receive_wqes = qp->init_attr.cap.max_recv_wr + 1;
282 u16 max_nr_send_wqes = qp->init_attr.cap.max_send_wr + 1;
283 int daqp_ctrl = parms->daqp_ctrl;
284
285 allocate_controls =
286 EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS,
287 (daqp_ctrl & DAQP_CTRL_ENABLE) ? 1 : 0)
288 | EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0)
289 | EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype)
290 | EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype)
291 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING,
292 (daqp_ctrl & DAQP_CTRL_RECV_COMP) ? 1 : 0)
293 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING,
294 (daqp_ctrl & DAQP_CTRL_SEND_COMP) ? 1 : 0)
295 | EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL,
296 parms->ud_av_l_key_ctl)
297 | EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1);
298
299 max_r10_reg =
300 EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR,
301 max_nr_send_wqes)
302 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR,
303 max_nr_receive_wqes)
304 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE,
305 parms->max_send_sge)
306 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE,
307 parms->max_recv_sge);
308
309 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
310 adapter_handle.handle, /* r4 */
311 allocate_controls, /* r5 */
312 qp->send_cq->ipz_cq_handle.handle,
313 qp->recv_cq->ipz_cq_handle.handle,
314 parms->ipz_eq_handle.handle,
315 ((u64)qp->token << 32) | parms->pd.value,
316 max_r10_reg, /* r10 */
317 parms->ud_av_l_key_ctl, /* r11 */
318 0);
319 qp->ipz_qp_handle.handle = outs[0];
320 qp->real_qp_num = (u32)outs[1];
321 parms->act_nr_send_sges =
322 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]);
323 parms->act_nr_recv_wqes =
324 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]);
325 parms->act_nr_send_sges =
326 (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]);
327 parms->act_nr_recv_sges =
328 (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]);
329 parms->nr_sq_pages =
330 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]);
331 parms->nr_rq_pages =
332 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
333
334 if (ret == H_SUCCESS)
335 hcp_galpas_ctor(&qp->galpas, outs[6], outs[6]);
336
337 if (ret == H_NOT_ENOUGH_RESOURCES)
338 ehca_gen_err("Not enough resources. ret=%lx", ret);
339
340 return ret;
341}
342
343u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
344 const u8 port_id,
345 struct hipz_query_port *query_port_response_block)
346{
347 u64 ret;
348 u64 r_cb = virt_to_abs(query_port_response_block);
349
350 if (r_cb & (EHCA_PAGESIZE-1)) {
351 ehca_gen_err("response block not page aligned");
352 return H_PARAMETER;
353 }
354
355 ret = ehca_plpar_hcall_norets(H_QUERY_PORT,
356 adapter_handle.handle, /* r4 */
357 port_id, /* r5 */
358 r_cb, /* r6 */
359 0, 0, 0, 0);
360
361 if (ehca_debug_level)
362 ehca_dmp(query_port_response_block, 64, "response_block");
363
364 return ret;
365}
366
367u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
368 struct hipz_query_hca *query_hca_rblock)
369{
370 u64 r_cb = virt_to_abs(query_hca_rblock);
371
372 if (r_cb & (EHCA_PAGESIZE-1)) {
373 ehca_gen_err("response_block=%p not page aligned",
374 query_hca_rblock);
375 return H_PARAMETER;
376 }
377
378 return ehca_plpar_hcall_norets(H_QUERY_HCA,
379 adapter_handle.handle, /* r4 */
380 r_cb, /* r5 */
381 0, 0, 0, 0, 0);
382}
383
384u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
385 const u8 pagesize,
386 const u8 queue_type,
387 const u64 resource_handle,
388 const u64 logical_address_of_page,
389 u64 count)
390{
391 return ehca_plpar_hcall_norets(H_REGISTER_RPAGES,
392 adapter_handle.handle, /* r4 */
393 queue_type | pagesize << 8, /* r5 */
394 resource_handle, /* r6 */
395 logical_address_of_page, /* r7 */
396 count, /* r8 */
397 0, 0);
398}
399
400u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
401 const struct ipz_eq_handle eq_handle,
402 struct ehca_pfeq *pfeq,
403 const u8 pagesize,
404 const u8 queue_type,
405 const u64 logical_address_of_page,
406 const u64 count)
407{
408 if (count != 1) {
409 ehca_gen_err("Ppage counter=%lx", count);
410 return H_PARAMETER;
411 }
412 return hipz_h_register_rpage(adapter_handle,
413 pagesize,
414 queue_type,
415 eq_handle.handle,
416 logical_address_of_page, count);
417}
418
419u64 hipz_h_query_int_state(const struct ipz_adapter_handle adapter_handle,
420 u32 ist)
421{
422 u64 ret;
423 ret = ehca_plpar_hcall_norets(H_QUERY_INT_STATE,
424 adapter_handle.handle, /* r4 */
425 ist, /* r5 */
426 0, 0, 0, 0, 0);
427
428 if (ret != H_SUCCESS && ret != H_BUSY)
429 ehca_gen_err("Could not query interrupt state.");
430
431 return ret;
432}
433
434u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
435 const struct ipz_cq_handle cq_handle,
436 struct ehca_pfcq *pfcq,
437 const u8 pagesize,
438 const u8 queue_type,
439 const u64 logical_address_of_page,
440 const u64 count,
441 const struct h_galpa gal)
442{
443 if (count != 1) {
444 ehca_gen_err("Page counter=%lx", count);
445 return H_PARAMETER;
446 }
447
448 return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
449 cq_handle.handle, logical_address_of_page,
450 count);
451}
452
453u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
454 const struct ipz_qp_handle qp_handle,
455 struct ehca_pfqp *pfqp,
456 const u8 pagesize,
457 const u8 queue_type,
458 const u64 logical_address_of_page,
459 const u64 count,
460 const struct h_galpa galpa)
461{
462 if (count != 1) {
463 ehca_gen_err("Page counter=%lx", count);
464 return H_PARAMETER;
465 }
466
467 return hipz_h_register_rpage(adapter_handle,pagesize,queue_type,
468 qp_handle.handle,logical_address_of_page,
469 count);
470}
471
472u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
473 const struct ipz_qp_handle qp_handle,
474 struct ehca_pfqp *pfqp,
475 void **log_addr_next_sq_wqe2processed,
476 void **log_addr_next_rq_wqe2processed,
477 int dis_and_get_function_code)
478{
479 u64 ret;
480 u64 outs[PLPAR_HCALL9_BUFSIZE];
481
482 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
483 adapter_handle.handle, /* r4 */
484 dis_and_get_function_code, /* r5 */
485 qp_handle.handle, /* r6 */
486 0, 0, 0, 0, 0, 0);
487 if (log_addr_next_sq_wqe2processed)
488 *log_addr_next_sq_wqe2processed = (void*)outs[0];
489 if (log_addr_next_rq_wqe2processed)
490 *log_addr_next_rq_wqe2processed = (void*)outs[1];
491
492 return ret;
493}
494
495u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
496 const struct ipz_qp_handle qp_handle,
497 struct ehca_pfqp *pfqp,
498 const u64 update_mask,
499 struct hcp_modify_qp_control_block *mqpcb,
500 struct h_galpa gal)
501{
502 u64 ret;
503 u64 outs[PLPAR_HCALL9_BUFSIZE];
504 ret = ehca_plpar_hcall9(H_MODIFY_QP, outs,
505 adapter_handle.handle, /* r4 */
506 qp_handle.handle, /* r5 */
507 update_mask, /* r6 */
508 virt_to_abs(mqpcb), /* r7 */
509 0, 0, 0, 0, 0);
510
511 if (ret == H_NOT_ENOUGH_RESOURCES)
512 ehca_gen_err("Insufficient resources ret=%lx", ret);
513
514 return ret;
515}
516
517u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
518 const struct ipz_qp_handle qp_handle,
519 struct ehca_pfqp *pfqp,
520 struct hcp_modify_qp_control_block *qqpcb,
521 struct h_galpa gal)
522{
523 return ehca_plpar_hcall_norets(H_QUERY_QP,
524 adapter_handle.handle, /* r4 */
525 qp_handle.handle, /* r5 */
526 virt_to_abs(qqpcb), /* r6 */
527 0, 0, 0, 0);
528}
529
530u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
531 struct ehca_qp *qp)
532{
533 u64 ret;
534 u64 outs[PLPAR_HCALL9_BUFSIZE];
535
536 ret = hcp_galpas_dtor(&qp->galpas);
537 if (ret) {
538 ehca_gen_err("Could not destruct qp->galpas");
539 return H_RESOURCE;
540 }
541 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
542 adapter_handle.handle, /* r4 */
543 /* function code */
544 1, /* r5 */
545 qp->ipz_qp_handle.handle, /* r6 */
546 0, 0, 0, 0, 0, 0);
547 if (ret == H_HARDWARE)
548 ehca_gen_err("HCA not operational. ret=%lx", ret);
549
550 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
551 adapter_handle.handle, /* r4 */
552 qp->ipz_qp_handle.handle, /* r5 */
553 0, 0, 0, 0, 0);
554
555 if (ret == H_RESOURCE)
556 ehca_gen_err("Resource still in use. ret=%lx", ret);
557
558 return ret;
559}
560
561u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
562 const struct ipz_qp_handle qp_handle,
563 struct h_galpa gal,
564 u32 port)
565{
566 return ehca_plpar_hcall_norets(H_DEFINE_AQP0,
567 adapter_handle.handle, /* r4 */
568 qp_handle.handle, /* r5 */
569 port, /* r6 */
570 0, 0, 0, 0);
571}
572
573u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
574 const struct ipz_qp_handle qp_handle,
575 struct h_galpa gal,
576 u32 port, u32 * pma_qp_nr,
577 u32 * bma_qp_nr)
578{
579 u64 ret;
580 u64 outs[PLPAR_HCALL9_BUFSIZE];
581
582 ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs,
583 adapter_handle.handle, /* r4 */
584 qp_handle.handle, /* r5 */
585 port, /* r6 */
586 0, 0, 0, 0, 0, 0);
587 *pma_qp_nr = (u32)outs[0];
588 *bma_qp_nr = (u32)outs[1];
589
590 if (ret == H_ALIAS_EXIST)
591 ehca_gen_err("AQP1 already exists. ret=%lx", ret);
592
593 return ret;
594}
595
596u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
597 const struct ipz_qp_handle qp_handle,
598 struct h_galpa gal,
599 u16 mcg_dlid,
600 u64 subnet_prefix, u64 interface_id)
601{
602 u64 ret;
603
604 ret = ehca_plpar_hcall_norets(H_ATTACH_MCQP,
605 adapter_handle.handle, /* r4 */
606 qp_handle.handle, /* r5 */
607 mcg_dlid, /* r6 */
608 interface_id, /* r7 */
609 subnet_prefix, /* r8 */
610 0, 0);
611
612 if (ret == H_NOT_ENOUGH_RESOURCES)
613 ehca_gen_err("Not enough resources. ret=%lx", ret);
614
615 return ret;
616}
617
618u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
619 const struct ipz_qp_handle qp_handle,
620 struct h_galpa gal,
621 u16 mcg_dlid,
622 u64 subnet_prefix, u64 interface_id)
623{
624 return ehca_plpar_hcall_norets(H_DETACH_MCQP,
625 adapter_handle.handle, /* r4 */
626 qp_handle.handle, /* r5 */
627 mcg_dlid, /* r6 */
628 interface_id, /* r7 */
629 subnet_prefix, /* r8 */
630 0, 0);
631}
632
633u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
634 struct ehca_cq *cq,
635 u8 force_flag)
636{
637 u64 ret;
638
639 ret = hcp_galpas_dtor(&cq->galpas);
640 if (ret) {
641 ehca_gen_err("Could not destruct cp->galpas");
642 return H_RESOURCE;
643 }
644
645 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
646 adapter_handle.handle, /* r4 */
647 cq->ipz_cq_handle.handle, /* r5 */
648 force_flag != 0 ? 1L : 0L, /* r6 */
649 0, 0, 0, 0);
650
651 if (ret == H_RESOURCE)
652 ehca_gen_err("H_FREE_RESOURCE failed ret=%lx ", ret);
653
654 return ret;
655}
656
657u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
658 struct ehca_eq *eq)
659{
660 u64 ret;
661
662 ret = hcp_galpas_dtor(&eq->galpas);
663 if (ret) {
664 ehca_gen_err("Could not destruct eq->galpas");
665 return H_RESOURCE;
666 }
667
668 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
669 adapter_handle.handle, /* r4 */
670 eq->ipz_eq_handle.handle, /* r5 */
671 0, 0, 0, 0, 0);
672
673 if (ret == H_RESOURCE)
674 ehca_gen_err("Resource in use. ret=%lx ", ret);
675
676 return ret;
677}
678
679u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
680 const struct ehca_mr *mr,
681 const u64 vaddr,
682 const u64 length,
683 const u32 access_ctrl,
684 const struct ipz_pd pd,
685 struct ehca_mr_hipzout_parms *outparms)
686{
687 u64 ret;
688 u64 outs[PLPAR_HCALL9_BUFSIZE];
689
690 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
691 adapter_handle.handle, /* r4 */
692 5, /* r5 */
693 vaddr, /* r6 */
694 length, /* r7 */
695 (((u64)access_ctrl) << 32ULL), /* r8 */
696 pd.value, /* r9 */
697 0, 0, 0);
698 outparms->handle.handle = outs[0];
699 outparms->lkey = (u32)outs[2];
700 outparms->rkey = (u32)outs[3];
701
702 return ret;
703}
704
705u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
706 const struct ehca_mr *mr,
707 const u8 pagesize,
708 const u8 queue_type,
709 const u64 logical_address_of_page,
710 const u64 count)
711{
712 u64 ret;
713
714 if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
715 ehca_gen_err("logical_address_of_page not on a 4k boundary "
716 "adapter_handle=%lx mr=%p mr_handle=%lx "
717 "pagesize=%x queue_type=%x "
718 "logical_address_of_page=%lx count=%lx",
719 adapter_handle.handle, mr,
720 mr->ipz_mr_handle.handle, pagesize, queue_type,
721 logical_address_of_page, count);
722 ret = H_PARAMETER;
723 } else
724 ret = hipz_h_register_rpage(adapter_handle, pagesize,
725 queue_type,
726 mr->ipz_mr_handle.handle,
727 logical_address_of_page, count);
728 return ret;
729}
730
731u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
732 const struct ehca_mr *mr,
733 struct ehca_mr_hipzout_parms *outparms)
734{
735 u64 ret;
736 u64 outs[PLPAR_HCALL9_BUFSIZE];
737
738 ret = ehca_plpar_hcall9(H_QUERY_MR, outs,
739 adapter_handle.handle, /* r4 */
740 mr->ipz_mr_handle.handle, /* r5 */
741 0, 0, 0, 0, 0, 0, 0);
742 outparms->len = outs[0];
743 outparms->vaddr = outs[1];
744 outparms->acl = outs[4] >> 32;
745 outparms->lkey = (u32)(outs[5] >> 32);
746 outparms->rkey = (u32)(outs[5] & (0xffffffff));
747
748 return ret;
749}
750
751u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
752 const struct ehca_mr *mr)
753{
754 return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
755 adapter_handle.handle, /* r4 */
756 mr->ipz_mr_handle.handle, /* r5 */
757 0, 0, 0, 0, 0);
758}
759
760u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
761 const struct ehca_mr *mr,
762 const u64 vaddr_in,
763 const u64 length,
764 const u32 access_ctrl,
765 const struct ipz_pd pd,
766 const u64 mr_addr_cb,
767 struct ehca_mr_hipzout_parms *outparms)
768{
769 u64 ret;
770 u64 outs[PLPAR_HCALL9_BUFSIZE];
771
772 ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs,
773 adapter_handle.handle, /* r4 */
774 mr->ipz_mr_handle.handle, /* r5 */
775 vaddr_in, /* r6 */
776 length, /* r7 */
777 /* r8 */
778 ((((u64)access_ctrl) << 32ULL) | pd.value),
779 mr_addr_cb, /* r9 */
780 0, 0, 0);
781 outparms->vaddr = outs[1];
782 outparms->lkey = (u32)outs[2];
783 outparms->rkey = (u32)outs[3];
784
785 return ret;
786}
787
788u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
789 const struct ehca_mr *mr,
790 const struct ehca_mr *orig_mr,
791 const u64 vaddr_in,
792 const u32 access_ctrl,
793 const struct ipz_pd pd,
794 struct ehca_mr_hipzout_parms *outparms)
795{
796 u64 ret;
797 u64 outs[PLPAR_HCALL9_BUFSIZE];
798
799 ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs,
800 adapter_handle.handle, /* r4 */
801 orig_mr->ipz_mr_handle.handle, /* r5 */
802 vaddr_in, /* r6 */
803 (((u64)access_ctrl) << 32ULL), /* r7 */
804 pd.value, /* r8 */
805 0, 0, 0, 0);
806 outparms->handle.handle = outs[0];
807 outparms->lkey = (u32)outs[2];
808 outparms->rkey = (u32)outs[3];
809
810 return ret;
811}
812
813u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
814 const struct ehca_mw *mw,
815 const struct ipz_pd pd,
816 struct ehca_mw_hipzout_parms *outparms)
817{
818 u64 ret;
819 u64 outs[PLPAR_HCALL9_BUFSIZE];
820
821 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
822 adapter_handle.handle, /* r4 */
823 6, /* r5 */
824 pd.value, /* r6 */
825 0, 0, 0, 0, 0, 0);
826 outparms->handle.handle = outs[0];
827 outparms->rkey = (u32)outs[3];
828
829 return ret;
830}
831
832u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
833 const struct ehca_mw *mw,
834 struct ehca_mw_hipzout_parms *outparms)
835{
836 u64 ret;
837 u64 outs[PLPAR_HCALL9_BUFSIZE];
838
839 ret = ehca_plpar_hcall9(H_QUERY_MW, outs,
840 adapter_handle.handle, /* r4 */
841 mw->ipz_mw_handle.handle, /* r5 */
842 0, 0, 0, 0, 0, 0, 0);
843 outparms->rkey = (u32)outs[3];
844
845 return ret;
846}
847
848u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
849 const struct ehca_mw *mw)
850{
851 return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
852 adapter_handle.handle, /* r4 */
853 mw->ipz_mw_handle.handle, /* r5 */
854 0, 0, 0, 0, 0);
855}
856
857u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
858 const u64 ressource_handle,
859 void *rblock,
860 unsigned long *byte_count)
861{
862 u64 r_cb = virt_to_abs(rblock);
863
864 if (r_cb & (EHCA_PAGESIZE-1)) {
865 ehca_gen_err("rblock not page aligned.");
866 return H_PARAMETER;
867 }
868
869 return ehca_plpar_hcall_norets(H_ERROR_DATA,
870 adapter_handle.handle,
871 ressource_handle,
872 r_cb,
873 0, 0, 0, 0);
874}
diff --git a/drivers/infiniband/hw/ehca/hcp_if.h b/drivers/infiniband/hw/ehca/hcp_if.h
new file mode 100644
index 000000000000..587ebd470959
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/hcp_if.h
@@ -0,0 +1,261 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Firmware Infiniband Interface code for POWER
5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Gerd Bayer <gerd.bayer@de.ibm.com>
9 * Waleri Fomin <fomin@de.ibm.com>
10 *
11 * Copyright (c) 2005 IBM Corporation
12 *
13 * All rights reserved.
14 *
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
16 * BSD.
17 *
18 * OpenIB BSD License
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
22 *
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
25 *
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
42 */
43
44#ifndef __HCP_IF_H__
45#define __HCP_IF_H__
46
47#include "ehca_classes.h"
48#include "ehca_tools.h"
49#include "hipz_hw.h"
50
51/*
52 * hipz_h_alloc_resource_eq allocates EQ resources in HW and FW, initalize
53 * resources, create the empty EQPT (ring).
54 */
55u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
56 struct ehca_pfeq *pfeq,
57 const u32 neq_control,
58 const u32 number_of_entries,
59 struct ipz_eq_handle *eq_handle,
60 u32 * act_nr_of_entries,
61 u32 * act_pages,
62 u32 * eq_ist);
63
64u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
65 struct ipz_eq_handle eq_handle,
66 const u64 event_mask);
67/*
68 * hipz_h_allocate_resource_cq allocates CQ resources in HW and FW, initialize
69 * resources, create the empty CQPT (ring).
70 */
71u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
72 struct ehca_cq *cq,
73 struct ehca_alloc_cq_parms *param);
74
75
76/*
77 * hipz_h_alloc_resource_qp allocates QP resources in HW and FW,
78 * initialize resources, create empty QPPTs (2 rings).
79 */
80u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
81 struct ehca_qp *qp,
82 struct ehca_alloc_qp_parms *parms);
83
84u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
85 const u8 port_id,
86 struct hipz_query_port *query_port_response_block);
87
88u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
89 struct hipz_query_hca *query_hca_rblock);
90
91/*
92 * hipz_h_register_rpage internal function in hcp_if.h for all
93 * hcp_H_REGISTER_RPAGE calls.
94 */
95u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
96 const u8 pagesize,
97 const u8 queue_type,
98 const u64 resource_handle,
99 const u64 logical_address_of_page,
100 u64 count);
101
102u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
103 const struct ipz_eq_handle eq_handle,
104 struct ehca_pfeq *pfeq,
105 const u8 pagesize,
106 const u8 queue_type,
107 const u64 logical_address_of_page,
108 const u64 count);
109
110u64 hipz_h_query_int_state(const struct ipz_adapter_handle
111 hcp_adapter_handle,
112 u32 ist);
113
114u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
115 const struct ipz_cq_handle cq_handle,
116 struct ehca_pfcq *pfcq,
117 const u8 pagesize,
118 const u8 queue_type,
119 const u64 logical_address_of_page,
120 const u64 count,
121 const struct h_galpa gal);
122
123u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
124 const struct ipz_qp_handle qp_handle,
125 struct ehca_pfqp *pfqp,
126 const u8 pagesize,
127 const u8 queue_type,
128 const u64 logical_address_of_page,
129 const u64 count,
130 const struct h_galpa galpa);
131
132u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
133 const struct ipz_qp_handle qp_handle,
134 struct ehca_pfqp *pfqp,
135 void **log_addr_next_sq_wqe_tb_processed,
136 void **log_addr_next_rq_wqe_tb_processed,
137 int dis_and_get_function_code);
138enum hcall_sigt {
139 HCALL_SIGT_NO_CQE = 0,
140 HCALL_SIGT_BY_WQE = 1,
141 HCALL_SIGT_EVERY = 2
142};
143
144u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
145 const struct ipz_qp_handle qp_handle,
146 struct ehca_pfqp *pfqp,
147 const u64 update_mask,
148 struct hcp_modify_qp_control_block *mqpcb,
149 struct h_galpa gal);
150
151u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
152 const struct ipz_qp_handle qp_handle,
153 struct ehca_pfqp *pfqp,
154 struct hcp_modify_qp_control_block *qqpcb,
155 struct h_galpa gal);
156
157u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
158 struct ehca_qp *qp);
159
160u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
161 const struct ipz_qp_handle qp_handle,
162 struct h_galpa gal,
163 u32 port);
164
165u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
166 const struct ipz_qp_handle qp_handle,
167 struct h_galpa gal,
168 u32 port, u32 * pma_qp_nr,
169 u32 * bma_qp_nr);
170
171u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
172 const struct ipz_qp_handle qp_handle,
173 struct h_galpa gal,
174 u16 mcg_dlid,
175 u64 subnet_prefix, u64 interface_id);
176
177u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
178 const struct ipz_qp_handle qp_handle,
179 struct h_galpa gal,
180 u16 mcg_dlid,
181 u64 subnet_prefix, u64 interface_id);
182
183u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
184 struct ehca_cq *cq,
185 u8 force_flag);
186
187u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
188 struct ehca_eq *eq);
189
190/*
191 * hipz_h_alloc_resource_mr allocates MR resources in HW and FW, initialize
192 * resources.
193 */
194u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
195 const struct ehca_mr *mr,
196 const u64 vaddr,
197 const u64 length,
198 const u32 access_ctrl,
199 const struct ipz_pd pd,
200 struct ehca_mr_hipzout_parms *outparms);
201
202/* hipz_h_register_rpage_mr registers MR resource pages in HW and FW */
203u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
204 const struct ehca_mr *mr,
205 const u8 pagesize,
206 const u8 queue_type,
207 const u64 logical_address_of_page,
208 const u64 count);
209
210/* hipz_h_query_mr queries MR in HW and FW */
211u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
212 const struct ehca_mr *mr,
213 struct ehca_mr_hipzout_parms *outparms);
214
215/* hipz_h_free_resource_mr frees MR resources in HW and FW */
216u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
217 const struct ehca_mr *mr);
218
219/* hipz_h_reregister_pmr reregisters MR in HW and FW */
220u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
221 const struct ehca_mr *mr,
222 const u64 vaddr_in,
223 const u64 length,
224 const u32 access_ctrl,
225 const struct ipz_pd pd,
226 const u64 mr_addr_cb,
227 struct ehca_mr_hipzout_parms *outparms);
228
229/* hipz_h_register_smr register shared MR in HW and FW */
230u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
231 const struct ehca_mr *mr,
232 const struct ehca_mr *orig_mr,
233 const u64 vaddr_in,
234 const u32 access_ctrl,
235 const struct ipz_pd pd,
236 struct ehca_mr_hipzout_parms *outparms);
237
238/*
239 * hipz_h_alloc_resource_mw allocates MW resources in HW and FW, initialize
240 * resources.
241 */
242u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
243 const struct ehca_mw *mw,
244 const struct ipz_pd pd,
245 struct ehca_mw_hipzout_parms *outparms);
246
247/* hipz_h_query_mw queries MW in HW and FW */
248u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
249 const struct ehca_mw *mw,
250 struct ehca_mw_hipzout_parms *outparms);
251
252/* hipz_h_free_resource_mw frees MW resources in HW and FW */
253u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
254 const struct ehca_mw *mw);
255
256u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
257 const u64 ressource_handle,
258 void *rblock,
259 unsigned long *byte_count);
260
261#endif /* __HCP_IF_H__ */
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.c b/drivers/infiniband/hw/ehca/hcp_phyp.c
new file mode 100644
index 000000000000..0b1a4772c78a
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/hcp_phyp.c
@@ -0,0 +1,80 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * load store abstraction for ehca register access with tracing
5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#include "ehca_classes.h"
43#include "hipz_hw.h"
44
45int hcall_map_page(u64 physaddr, u64 *mapaddr)
46{
47 *mapaddr = (u64)(ioremap(physaddr, EHCA_PAGESIZE));
48 return 0;
49}
50
51int hcall_unmap_page(u64 mapaddr)
52{
53 iounmap((volatile void __iomem*)mapaddr);
54 return 0;
55}
56
57int hcp_galpas_ctor(struct h_galpas *galpas,
58 u64 paddr_kernel, u64 paddr_user)
59{
60 int ret = hcall_map_page(paddr_kernel, &galpas->kernel.fw_handle);
61 if (ret)
62 return ret;
63
64 galpas->user.fw_handle = paddr_user;
65
66 return 0;
67}
68
69int hcp_galpas_dtor(struct h_galpas *galpas)
70{
71 if (galpas->kernel.fw_handle) {
72 int ret = hcall_unmap_page(galpas->kernel.fw_handle);
73 if (ret)
74 return ret;
75 }
76
77 galpas->user.fw_handle = galpas->kernel.fw_handle = 0;
78
79 return 0;
80}
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.h b/drivers/infiniband/hw/ehca/hcp_phyp.h
new file mode 100644
index 000000000000..5305c2a3ed94
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/hcp_phyp.h
@@ -0,0 +1,90 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * Firmware calls
5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Waleri Fomin <fomin@de.ibm.com>
9 * Gerd Bayer <gerd.bayer@de.ibm.com>
10 *
11 * Copyright (c) 2005 IBM Corporation
12 *
13 * All rights reserved.
14 *
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
16 * BSD.
17 *
18 * OpenIB BSD License
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
22 *
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
25 *
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
42 */
43
44#ifndef __HCP_PHYP_H__
45#define __HCP_PHYP_H__
46
47
48/*
49 * eHCA page (mapped into memory)
50 * resource to access eHCA register pages in CPU address space
51*/
52struct h_galpa {
53 u64 fw_handle;
54 /* for pSeries this is a 64bit memory address where
55 I/O memory is mapped into CPU address space (kv) */
56};
57
58/*
59 * resource to access eHCA address space registers, all types
60 */
61struct h_galpas {
62 u32 pid; /*PID of userspace galpa checking */
63 struct h_galpa user; /* user space accessible resource,
64 set to 0 if unused */
65 struct h_galpa kernel; /* kernel space accessible resource,
66 set to 0 if unused */
67};
68
69static inline u64 hipz_galpa_load(struct h_galpa galpa, u32 offset)
70{
71 u64 addr = galpa.fw_handle + offset;
72 return *(volatile u64 __force *)addr;
73}
74
75static inline void hipz_galpa_store(struct h_galpa galpa, u32 offset, u64 value)
76{
77 u64 addr = galpa.fw_handle + offset;
78 *(volatile u64 __force *)addr = value;
79}
80
81int hcp_galpas_ctor(struct h_galpas *galpas,
82 u64 paddr_kernel, u64 paddr_user);
83
84int hcp_galpas_dtor(struct h_galpas *galpas);
85
86int hcall_map_page(u64 physaddr, u64 * mapaddr);
87
88int hcall_unmap_page(u64 mapaddr);
89
90#endif
diff --git a/drivers/infiniband/hw/ehca/hipz_fns.h b/drivers/infiniband/hw/ehca/hipz_fns.h
new file mode 100644
index 000000000000..9dac93d02140
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/hipz_fns.h
@@ -0,0 +1,68 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * HW abstraction register functions
5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Reinhard Ernst <rernst@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#ifndef __HIPZ_FNS_H__
43#define __HIPZ_FNS_H__
44
45#include "ehca_classes.h"
46#include "hipz_hw.h"
47
48#include "hipz_fns_core.h"
49
50#define hipz_galpa_store_eq(gal, offset, value) \
51 hipz_galpa_store(gal, EQTEMM_OFFSET(offset), value)
52
53#define hipz_galpa_load_eq(gal, offset) \
54 hipz_galpa_load(gal, EQTEMM_OFFSET(offset))
55
56#define hipz_galpa_store_qped(gal, offset, value) \
57 hipz_galpa_store(gal, QPEDMM_OFFSET(offset), value)
58
59#define hipz_galpa_load_qped(gal, offset) \
60 hipz_galpa_load(gal, QPEDMM_OFFSET(offset))
61
62#define hipz_galpa_store_mrmw(gal, offset, value) \
63 hipz_galpa_store(gal, MRMWMM_OFFSET(offset), value)
64
65#define hipz_galpa_load_mrmw(gal, offset) \
66 hipz_galpa_load(gal, MRMWMM_OFFSET(offset))
67
68#endif
diff --git a/drivers/infiniband/hw/ehca/hipz_fns_core.h b/drivers/infiniband/hw/ehca/hipz_fns_core.h
new file mode 100644
index 000000000000..20898a153446
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/hipz_fns_core.h
@@ -0,0 +1,100 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * HW abstraction register functions
5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Heiko J Schick <schickhj@de.ibm.com>
8 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
9 * Reinhard Ernst <rernst@de.ibm.com>
10 *
11 * Copyright (c) 2005 IBM Corporation
12 *
13 * All rights reserved.
14 *
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
16 * BSD.
17 *
18 * OpenIB BSD License
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
22 *
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
25 *
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
42 */
43
44#ifndef __HIPZ_FNS_CORE_H__
45#define __HIPZ_FNS_CORE_H__
46
47#include "hcp_phyp.h"
48#include "hipz_hw.h"
49
50#define hipz_galpa_store_cq(gal, offset, value) \
51 hipz_galpa_store(gal, CQTEMM_OFFSET(offset), value)
52
53#define hipz_galpa_load_cq(gal, offset) \
54 hipz_galpa_load(gal, CQTEMM_OFFSET(offset))
55
56#define hipz_galpa_store_qp(gal,offset, value) \
57 hipz_galpa_store(gal, QPTEMM_OFFSET(offset), value)
58#define hipz_galpa_load_qp(gal, offset) \
59 hipz_galpa_load(gal,QPTEMM_OFFSET(offset))
60
61static inline void hipz_update_sqa(struct ehca_qp *qp, u16 nr_wqes)
62{
63 /* ringing doorbell :-) */
64 hipz_galpa_store_qp(qp->galpas.kernel, qpx_sqa,
65 EHCA_BMASK_SET(QPX_SQADDER, nr_wqes));
66}
67
68static inline void hipz_update_rqa(struct ehca_qp *qp, u16 nr_wqes)
69{
70 /* ringing doorbell :-) */
71 hipz_galpa_store_qp(qp->galpas.kernel, qpx_rqa,
72 EHCA_BMASK_SET(QPX_RQADDER, nr_wqes));
73}
74
75static inline void hipz_update_feca(struct ehca_cq *cq, u32 nr_cqes)
76{
77 hipz_galpa_store_cq(cq->galpas.kernel, cqx_feca,
78 EHCA_BMASK_SET(CQX_FECADDER, nr_cqes));
79}
80
81static inline void hipz_set_cqx_n0(struct ehca_cq *cq, u32 value)
82{
83 u64 cqx_n0_reg;
84
85 hipz_galpa_store_cq(cq->galpas.kernel, cqx_n0,
86 EHCA_BMASK_SET(CQX_N0_GENERATE_SOLICITED_COMP_EVENT,
87 value));
88 cqx_n0_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n0);
89}
90
91static inline void hipz_set_cqx_n1(struct ehca_cq *cq, u32 value)
92{
93 u64 cqx_n1_reg;
94
95 hipz_galpa_store_cq(cq->galpas.kernel, cqx_n1,
96 EHCA_BMASK_SET(CQX_N1_GENERATE_COMP_EVENT, value));
97 cqx_n1_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n1);
98}
99
100#endif /* __HIPZ_FNC_CORE_H__ */
diff --git a/drivers/infiniband/hw/ehca/hipz_hw.h b/drivers/infiniband/hw/ehca/hipz_hw.h
new file mode 100644
index 000000000000..3fc92b031c50
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/hipz_hw.h
@@ -0,0 +1,388 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * eHCA register definitions
5 *
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 * Reinhard Ernst <rernst@de.ibm.com>
9 *
10 * Copyright (c) 2005 IBM Corporation
11 *
12 * All rights reserved.
13 *
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
15 * BSD.
16 *
17 * OpenIB BSD License
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
21 *
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
24 *
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43#ifndef __HIPZ_HW_H__
44#define __HIPZ_HW_H__
45
46#include "ehca_tools.h"
47
48/* QP Table Entry Memory Map */
49struct hipz_qptemm {
50 u64 qpx_hcr;
51 u64 qpx_c;
52 u64 qpx_herr;
53 u64 qpx_aer;
54/* 0x20*/
55 u64 qpx_sqa;
56 u64 qpx_sqc;
57 u64 qpx_rqa;
58 u64 qpx_rqc;
59/* 0x40*/
60 u64 qpx_st;
61 u64 qpx_pmstate;
62 u64 qpx_pmfa;
63 u64 qpx_pkey;
64/* 0x60*/
65 u64 qpx_pkeya;
66 u64 qpx_pkeyb;
67 u64 qpx_pkeyc;
68 u64 qpx_pkeyd;
69/* 0x80*/
70 u64 qpx_qkey;
71 u64 qpx_dqp;
72 u64 qpx_dlidp;
73 u64 qpx_portp;
74/* 0xa0*/
75 u64 qpx_slidp;
76 u64 qpx_slidpp;
77 u64 qpx_dlida;
78 u64 qpx_porta;
79/* 0xc0*/
80 u64 qpx_slida;
81 u64 qpx_slidpa;
82 u64 qpx_slvl;
83 u64 qpx_ipd;
84/* 0xe0*/
85 u64 qpx_mtu;
86 u64 qpx_lato;
87 u64 qpx_rlimit;
88 u64 qpx_rnrlimit;
89/* 0x100*/
90 u64 qpx_t;
91 u64 qpx_sqhp;
92 u64 qpx_sqptp;
93 u64 qpx_nspsn;
94/* 0x120*/
95 u64 qpx_nspsnhwm;
96 u64 reserved1;
97 u64 qpx_sdsi;
98 u64 qpx_sdsbc;
99/* 0x140*/
100 u64 qpx_sqwsize;
101 u64 qpx_sqwts;
102 u64 qpx_lsn;
103 u64 qpx_nssn;
104/* 0x160 */
105 u64 qpx_mor;
106 u64 qpx_cor;
107 u64 qpx_sqsize;
108 u64 qpx_erc;
109/* 0x180*/
110 u64 qpx_rnrrc;
111 u64 qpx_ernrwt;
112 u64 qpx_rnrresp;
113 u64 qpx_lmsna;
114/* 0x1a0 */
115 u64 qpx_sqhpc;
116 u64 qpx_sqcptp;
117 u64 qpx_sigt;
118 u64 qpx_wqecnt;
119/* 0x1c0*/
120 u64 qpx_rqhp;
121 u64 qpx_rqptp;
122 u64 qpx_rqsize;
123 u64 qpx_nrr;
124/* 0x1e0*/
125 u64 qpx_rdmac;
126 u64 qpx_nrpsn;
127 u64 qpx_lapsn;
128 u64 qpx_lcr;
129/* 0x200*/
130 u64 qpx_rwc;
131 u64 qpx_rwva;
132 u64 qpx_rdsi;
133 u64 qpx_rdsbc;
134/* 0x220*/
135 u64 qpx_rqwsize;
136 u64 qpx_crmsn;
137 u64 qpx_rdd;
138 u64 qpx_larpsn;
139/* 0x240*/
140 u64 qpx_pd;
141 u64 qpx_scqn;
142 u64 qpx_rcqn;
143 u64 qpx_aeqn;
144/* 0x260*/
145 u64 qpx_aaelog;
146 u64 qpx_ram;
147 u64 qpx_rdmaqe0;
148 u64 qpx_rdmaqe1;
149/* 0x280*/
150 u64 qpx_rdmaqe2;
151 u64 qpx_rdmaqe3;
152 u64 qpx_nrpsnhwm;
153/* 0x298*/
154 u64 reserved[(0x400 - 0x298) / 8];
155/* 0x400 extended data */
156 u64 reserved_ext[(0x500 - 0x400) / 8];
157/* 0x500 */
158 u64 reserved2[(0x1000 - 0x500) / 8];
159/* 0x1000 */
160};
161
162#define QPX_SQADDER EHCA_BMASK_IBM(48,63)
163#define QPX_RQADDER EHCA_BMASK_IBM(48,63)
164
165#define QPTEMM_OFFSET(x) offsetof(struct hipz_qptemm,x)
166
167/* MRMWPT Entry Memory Map */
168struct hipz_mrmwmm {
169 /* 0x00 */
170 u64 mrx_hcr;
171
172 u64 mrx_c;
173 u64 mrx_herr;
174 u64 mrx_aer;
175 /* 0x20 */
176 u64 mrx_pp;
177 u64 reserved1;
178 u64 reserved2;
179 u64 reserved3;
180 /* 0x40 */
181 u64 reserved4[(0x200 - 0x40) / 8];
182 /* 0x200 */
183 u64 mrx_ctl[64];
184
185};
186
187#define MRMWMM_OFFSET(x) offsetof(struct hipz_mrmwmm,x)
188
189struct hipz_qpedmm {
190 /* 0x00 */
191 u64 reserved0[(0x400) / 8];
192 /* 0x400 */
193 u64 qpedx_phh;
194 u64 qpedx_ppsgp;
195 /* 0x410 */
196 u64 qpedx_ppsgu;
197 u64 qpedx_ppdgp;
198 /* 0x420 */
199 u64 qpedx_ppdgu;
200 u64 qpedx_aph;
201 /* 0x430 */
202 u64 qpedx_apsgp;
203 u64 qpedx_apsgu;
204 /* 0x440 */
205 u64 qpedx_apdgp;
206 u64 qpedx_apdgu;
207 /* 0x450 */
208 u64 qpedx_apav;
209 u64 qpedx_apsav;
210 /* 0x460 */
211 u64 qpedx_hcr;
212 u64 reserved1[4];
213 /* 0x488 */
214 u64 qpedx_rrl0;
215 /* 0x490 */
216 u64 qpedx_rrrkey0;
217 u64 qpedx_rrva0;
218 /* 0x4a0 */
219 u64 reserved2;
220 u64 qpedx_rrl1;
221 /* 0x4b0 */
222 u64 qpedx_rrrkey1;
223 u64 qpedx_rrva1;
224 /* 0x4c0 */
225 u64 reserved3;
226 u64 qpedx_rrl2;
227 /* 0x4d0 */
228 u64 qpedx_rrrkey2;
229 u64 qpedx_rrva2;
230 /* 0x4e0 */
231 u64 reserved4;
232 u64 qpedx_rrl3;
233 /* 0x4f0 */
234 u64 qpedx_rrrkey3;
235 u64 qpedx_rrva3;
236};
237
238#define QPEDMM_OFFSET(x) offsetof(struct hipz_qpedmm,x)
239
240/* CQ Table Entry Memory Map */
241struct hipz_cqtemm {
242 u64 cqx_hcr;
243 u64 cqx_c;
244 u64 cqx_herr;
245 u64 cqx_aer;
246/* 0x20 */
247 u64 cqx_ptp;
248 u64 cqx_tp;
249 u64 cqx_fec;
250 u64 cqx_feca;
251/* 0x40 */
252 u64 cqx_ep;
253 u64 cqx_eq;
254/* 0x50 */
255 u64 reserved1;
256 u64 cqx_n0;
257/* 0x60 */
258 u64 cqx_n1;
259 u64 reserved2[(0x1000 - 0x60) / 8];
260/* 0x1000 */
261};
262
263#define CQX_FEC_CQE_CNT EHCA_BMASK_IBM(32,63)
264#define CQX_FECADDER EHCA_BMASK_IBM(32,63)
265#define CQX_N0_GENERATE_SOLICITED_COMP_EVENT EHCA_BMASK_IBM(0,0)
266#define CQX_N1_GENERATE_COMP_EVENT EHCA_BMASK_IBM(0,0)
267
268#define CQTEMM_OFFSET(x) offsetof(struct hipz_cqtemm,x)
269
270/* EQ Table Entry Memory Map */
271struct hipz_eqtemm {
272 u64 eqx_hcr;
273 u64 eqx_c;
274
275 u64 eqx_herr;
276 u64 eqx_aer;
277/* 0x20 */
278 u64 eqx_ptp;
279 u64 eqx_tp;
280 u64 eqx_ssba;
281 u64 eqx_psba;
282
283/* 0x40 */
284 u64 eqx_cec;
285 u64 eqx_meql;
286 u64 eqx_xisbi;
287 u64 eqx_xisc;
288/* 0x60 */
289 u64 eqx_it;
290
291};
292
293#define EQTEMM_OFFSET(x) offsetof(struct hipz_eqtemm,x)
294
295/* access control defines for MR/MW */
296#define HIPZ_ACCESSCTRL_L_WRITE 0x00800000
297#define HIPZ_ACCESSCTRL_R_WRITE 0x00400000
298#define HIPZ_ACCESSCTRL_R_READ 0x00200000
299#define HIPZ_ACCESSCTRL_R_ATOMIC 0x00100000
300#define HIPZ_ACCESSCTRL_MW_BIND 0x00080000
301
302/* query hca response block */
303struct hipz_query_hca {
304 u32 cur_reliable_dg;
305 u32 cur_qp;
306 u32 cur_cq;
307 u32 cur_eq;
308 u32 cur_mr;
309 u32 cur_mw;
310 u32 cur_ee_context;
311 u32 cur_mcast_grp;
312 u32 cur_qp_attached_mcast_grp;
313 u32 reserved1;
314 u32 cur_ipv6_qp;
315 u32 cur_eth_qp;
316 u32 cur_hp_mr;
317 u32 reserved2[3];
318 u32 max_rd_domain;
319 u32 max_qp;
320 u32 max_cq;
321 u32 max_eq;
322 u32 max_mr;
323 u32 max_hp_mr;
324 u32 max_mw;
325 u32 max_mrwpte;
326 u32 max_special_mrwpte;
327 u32 max_rd_ee_context;
328 u32 max_mcast_grp;
329 u32 max_total_mcast_qp_attach;
330 u32 max_mcast_qp_attach;
331 u32 max_raw_ipv6_qp;
332 u32 max_raw_ethy_qp;
333 u32 internal_clock_frequency;
334 u32 max_pd;
335 u32 max_ah;
336 u32 max_cqe;
337 u32 max_wqes_wq;
338 u32 max_partitions;
339 u32 max_rr_ee_context;
340 u32 max_rr_qp;
341 u32 max_rr_hca;
342 u32 max_act_wqs_ee_context;
343 u32 max_act_wqs_qp;
344 u32 max_sge;
345 u32 max_sge_rd;
346 u32 memory_page_size_supported;
347 u64 max_mr_size;
348 u32 local_ca_ack_delay;
349 u32 num_ports;
350 u32 vendor_id;
351 u32 vendor_part_id;
352 u32 hw_ver;
353 u64 node_guid;
354 u64 hca_cap_indicators;
355 u32 data_counter_register_size;
356 u32 max_shared_rq;
357 u32 max_isns_eq;
358 u32 max_neq;
359} __attribute__ ((packed));
360
361/* query port response block */
362struct hipz_query_port {
363 u32 state;
364 u32 bad_pkey_cntr;
365 u32 lmc;
366 u32 lid;
367 u32 subnet_timeout;
368 u32 qkey_viol_cntr;
369 u32 sm_sl;
370 u32 sm_lid;
371 u32 capability_mask;
372 u32 init_type_reply;
373 u32 pkey_tbl_len;
374 u32 gid_tbl_len;
375 u64 gid_prefix;
376 u32 port_nr;
377 u16 pkey_entries[16];
378 u8 reserved1[32];
379 u32 trent_size;
380 u32 trbuf_size;
381 u64 max_msg_sz;
382 u32 max_mtu;
383 u32 vl_cap;
384 u8 reserved2[1900];
385 u64 guid_entries[255];
386} __attribute__ ((packed));
387
388#endif
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
new file mode 100644
index 000000000000..e028ff1588cc
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
@@ -0,0 +1,149 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * internal queue handling
5 *
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Reinhard Ernst <rernst@de.ibm.com>
8 * Christoph Raisch <raisch@de.ibm.com>
9 *
10 * Copyright (c) 2005 IBM Corporation
11 *
12 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
13 * BSD.
14 *
15 * OpenIB BSD License
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are met:
19 *
20 * Redistributions of source code must retain the above copyright notice, this
21 * list of conditions and the following disclaimer.
22 *
23 * Redistributions in binary form must reproduce the above copyright notice,
24 * this list of conditions and the following disclaimer in the documentation
25 * and/or other materials
26 * provided with the distribution.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
29 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
32 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
35 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
36 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 */
40
41#include "ehca_tools.h"
42#include "ipz_pt_fn.h"
43
44void *ipz_qpageit_get_inc(struct ipz_queue *queue)
45{
46 void *ret = ipz_qeit_get(queue);
47 queue->current_q_offset += queue->pagesize;
48 if (queue->current_q_offset > queue->queue_length) {
49 queue->current_q_offset -= queue->pagesize;
50 ret = NULL;
51 }
52 if (((u64)ret) % EHCA_PAGESIZE) {
53 ehca_gen_err("ERROR!! not at PAGE-Boundary");
54 return NULL;
55 }
56 return ret;
57}
58
59void *ipz_qeit_eq_get_inc(struct ipz_queue *queue)
60{
61 void *ret = ipz_qeit_get(queue);
62 u64 last_entry_in_q = queue->queue_length - queue->qe_size;
63
64 queue->current_q_offset += queue->qe_size;
65 if (queue->current_q_offset > last_entry_in_q) {
66 queue->current_q_offset = 0;
67 queue->toggle_state = (~queue->toggle_state) & 1;
68 }
69
70 return ret;
71}
72
73int ipz_queue_ctor(struct ipz_queue *queue,
74 const u32 nr_of_pages,
75 const u32 pagesize, const u32 qe_size, const u32 nr_of_sg)
76{
77 int pages_per_kpage = PAGE_SIZE >> EHCA_PAGESHIFT;
78 int f;
79
80 if (pagesize > PAGE_SIZE) {
81 ehca_gen_err("FATAL ERROR: pagesize=%x is greater "
82 "than kernel page size", pagesize);
83 return 0;
84 }
85 if (!pages_per_kpage) {
86 ehca_gen_err("FATAL ERROR: invalid kernel page size. "
87 "pages_per_kpage=%x", pages_per_kpage);
88 return 0;
89 }
90 queue->queue_length = nr_of_pages * pagesize;
91 queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *));
92 if (!queue->queue_pages) {
93 ehca_gen_err("ERROR!! didn't get the memory");
94 return 0;
95 }
96 memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *));
97 /*
98 * allocate pages for queue:
99 * outer loop allocates whole kernel pages (page aligned) and
100 * inner loop divides a kernel page into smaller hca queue pages
101 */
102 f = 0;
103 while (f < nr_of_pages) {
104 u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL);
105 int k;
106 if (!kpage)
107 goto ipz_queue_ctor_exit0; /*NOMEM*/
108 for (k = 0; k < pages_per_kpage && f < nr_of_pages; k++) {
109 (queue->queue_pages)[f] = (struct ipz_page *)kpage;
110 kpage += EHCA_PAGESIZE;
111 f++;
112 }
113 }
114
115 queue->current_q_offset = 0;
116 queue->qe_size = qe_size;
117 queue->act_nr_of_sg = nr_of_sg;
118 queue->pagesize = pagesize;
119 queue->toggle_state = 1;
120 return 1;
121
122 ipz_queue_ctor_exit0:
123 ehca_gen_err("Couldn't get alloc pages queue=%p f=%x nr_of_pages=%x",
124 queue, f, nr_of_pages);
125 for (f = 0; f < nr_of_pages; f += pages_per_kpage) {
126 if (!(queue->queue_pages)[f])
127 break;
128 free_page((unsigned long)(queue->queue_pages)[f]);
129 }
130 return 0;
131}
132
133int ipz_queue_dtor(struct ipz_queue *queue)
134{
135 int pages_per_kpage = PAGE_SIZE >> EHCA_PAGESHIFT;
136 int g;
137 int nr_pages;
138
139 if (!queue || !queue->queue_pages) {
140 ehca_gen_dbg("queue or queue_pages is NULL");
141 return 0;
142 }
143 nr_pages = queue->queue_length / queue->pagesize;
144 for (g = 0; g < nr_pages; g += pages_per_kpage)
145 free_page((unsigned long)(queue->queue_pages)[g]);
146 vfree(queue->queue_pages);
147
148 return 1;
149}
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.h b/drivers/infiniband/hw/ehca/ipz_pt_fn.h
new file mode 100644
index 000000000000..2f13509d5257
--- /dev/null
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.h
@@ -0,0 +1,247 @@
1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * internal queue handling
5 *
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Reinhard Ernst <rernst@de.ibm.com>
8 * Christoph Raisch <raisch@de.ibm.com>
9 *
10 * Copyright (c) 2005 IBM Corporation
11 *
12 * All rights reserved.
13 *
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
15 * BSD.
16 *
17 * OpenIB BSD License
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
21 *
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
24 *
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43#ifndef __IPZ_PT_FN_H__
44#define __IPZ_PT_FN_H__
45
46#define EHCA_PAGESHIFT 12
47#define EHCA_PAGESIZE 4096UL
48#define EHCA_PAGEMASK (~(EHCA_PAGESIZE-1))
49#define EHCA_PT_ENTRIES 512UL
50
51#include "ehca_tools.h"
52#include "ehca_qes.h"
53
54/* struct generic ehca page */
55struct ipz_page {
56 u8 entries[EHCA_PAGESIZE];
57};
58
59/* struct generic queue in linux kernel virtual memory (kv) */
60struct ipz_queue {
61 u64 current_q_offset; /* current queue entry */
62
63 struct ipz_page **queue_pages; /* array of pages belonging to queue */
64 u32 qe_size; /* queue entry size */
65 u32 act_nr_of_sg;
66 u32 queue_length; /* queue length allocated in bytes */
67 u32 pagesize;
68 u32 toggle_state; /* toggle flag - per page */
69 u32 dummy3; /* 64 bit alignment */
70};
71
72/*
73 * return current Queue Entry for a certain q_offset
74 * returns address (kv) of Queue Entry
75 */
76static inline void *ipz_qeit_calc(struct ipz_queue *queue, u64 q_offset)
77{
78 struct ipz_page *current_page;
79 if (q_offset >= queue->queue_length)
80 return NULL;
81 current_page = (queue->queue_pages)[q_offset >> EHCA_PAGESHIFT];
82 return &current_page->entries[q_offset & (EHCA_PAGESIZE - 1)];
83}
84
85/*
86 * return current Queue Entry
87 * returns address (kv) of Queue Entry
88 */
89static inline void *ipz_qeit_get(struct ipz_queue *queue)
90{
91 return ipz_qeit_calc(queue, queue->current_q_offset);
92}
93
94/*
95 * return current Queue Page , increment Queue Page iterator from
96 * page to page in struct ipz_queue, last increment will return 0! and
97 * NOT wrap
98 * returns address (kv) of Queue Page
99 * warning don't use in parallel with ipz_QE_get_inc()
100 */
101void *ipz_qpageit_get_inc(struct ipz_queue *queue);
102
103/*
104 * return current Queue Entry, increment Queue Entry iterator by one
105 * step in struct ipz_queue, will wrap in ringbuffer
106 * returns address (kv) of Queue Entry BEFORE increment
107 * warning don't use in parallel with ipz_qpageit_get_inc()
108 * warning unpredictable results may occur if steps>act_nr_of_queue_entries
109 */
110static inline void *ipz_qeit_get_inc(struct ipz_queue *queue)
111{
112 void *ret = ipz_qeit_get(queue);
113 queue->current_q_offset += queue->qe_size;
114 if (queue->current_q_offset >= queue->queue_length) {
115 queue->current_q_offset = 0;
116 /* toggle the valid flag */
117 queue->toggle_state = (~queue->toggle_state) & 1;
118 }
119
120 return ret;
121}
122
123/*
124 * return current Queue Entry, increment Queue Entry iterator by one
125 * step in struct ipz_queue, will wrap in ringbuffer
126 * returns address (kv) of Queue Entry BEFORE increment
127 * returns 0 and does not increment, if wrong valid state
128 * warning don't use in parallel with ipz_qpageit_get_inc()
129 * warning unpredictable results may occur if steps>act_nr_of_queue_entries
130 */
131static inline void *ipz_qeit_get_inc_valid(struct ipz_queue *queue)
132{
133 struct ehca_cqe *cqe = ipz_qeit_get(queue);
134 u32 cqe_flags = cqe->cqe_flags;
135
136 if ((cqe_flags >> 7) != (queue->toggle_state & 1))
137 return NULL;
138
139 ipz_qeit_get_inc(queue);
140 return cqe;
141}
142
143/*
144 * returns and resets Queue Entry iterator
145 * returns address (kv) of first Queue Entry
146 */
147static inline void *ipz_qeit_reset(struct ipz_queue *queue)
148{
149 queue->current_q_offset = 0;
150 return ipz_qeit_get(queue);
151}
152
153/* struct generic page table */
154struct ipz_pt {
155 u64 entries[EHCA_PT_ENTRIES];
156};
157
158/* struct page table for a queue, only to be used in pf */
159struct ipz_qpt {
160 /* queue page tables (kv), use u64 because we know the element length */
161 u64 *qpts;
162 u32 n_qpts;
163 u32 n_ptes; /* number of page table entries */
164 u64 *current_pte_addr;
165};
166
167/*
168 * constructor for a ipz_queue_t, placement new for ipz_queue_t,
169 * new for all dependent datastructors
170 * all QP Tables are the same
171 * flow:
172 * allocate+pin queue
173 * see ipz_qpt_ctor()
174 * returns true if ok, false if out of memory
175 */
176int ipz_queue_ctor(struct ipz_queue *queue, const u32 nr_of_pages,
177 const u32 pagesize, const u32 qe_size,
178 const u32 nr_of_sg);
179
180/*
181 * destructor for a ipz_queue_t
182 * -# free queue
183 * see ipz_queue_ctor()
184 * returns true if ok, false if queue was NULL-ptr of free failed
185 */
186int ipz_queue_dtor(struct ipz_queue *queue);
187
188/*
189 * constructor for a ipz_qpt_t,
190 * placement new for struct ipz_queue, new for all dependent datastructors
191 * all QP Tables are the same,
192 * flow:
193 * -# allocate+pin queue
194 * -# initialise ptcb
195 * -# allocate+pin PTs
196 * -# link PTs to a ring, according to HCA Arch, set bit62 id needed
197 * -# the ring must have room for exactly nr_of_PTEs
198 * see ipz_qpt_ctor()
199 */
200void ipz_qpt_ctor(struct ipz_qpt *qpt,
201 const u32 nr_of_qes,
202 const u32 pagesize,
203 const u32 qe_size,
204 const u8 lowbyte, const u8 toggle,
205 u32 * act_nr_of_QEs, u32 * act_nr_of_pages);
206
207/*
208 * return current Queue Entry, increment Queue Entry iterator by one
209 * step in struct ipz_queue, will wrap in ringbuffer
210 * returns address (kv) of Queue Entry BEFORE increment
211 * warning don't use in parallel with ipz_qpageit_get_inc()
212 * warning unpredictable results may occur if steps>act_nr_of_queue_entries
213 * fix EQ page problems
214 */
215void *ipz_qeit_eq_get_inc(struct ipz_queue *queue);
216
217/*
218 * return current Event Queue Entry, increment Queue Entry iterator
219 * by one step in struct ipz_queue if valid, will wrap in ringbuffer
220 * returns address (kv) of Queue Entry BEFORE increment
221 * returns 0 and does not increment, if wrong valid state
222 * warning don't use in parallel with ipz_queue_QPageit_get_inc()
223 * warning unpredictable results may occur if steps>act_nr_of_queue_entries
224 */
225static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue)
226{
227 void *ret = ipz_qeit_get(queue);
228 u32 qe = *(u8 *) ret;
229 if ((qe >> 7) != (queue->toggle_state & 1))
230 return NULL;
231 ipz_qeit_eq_get_inc(queue); /* this is a good one */
232 return ret;
233}
234
235/* returns address (GX) of first queue entry */
236static inline u64 ipz_qpt_get_firstpage(struct ipz_qpt *qpt)
237{
238 return be64_to_cpu(qpt->qpts[0]);
239}
240
241/* returns address (kv) of first page of queue page table */
242static inline void *ipz_qpt_get_qpt(struct ipz_qpt *qpt)
243{
244 return qpt->qpts;
245}
246
247#endif /* __IPZ_PT_FN_H__ */
diff --git a/drivers/infiniband/hw/ipath/Kconfig b/drivers/infiniband/hw/ipath/Kconfig
index 1db9489f1e82..574a678e7fdd 100644
--- a/drivers/infiniband/hw/ipath/Kconfig
+++ b/drivers/infiniband/hw/ipath/Kconfig
@@ -1,16 +1,9 @@
1config IPATH_CORE
2 tristate "QLogic InfiniPath Driver"
3 depends on 64BIT && PCI_MSI && NET
4 ---help---
5 This is a low-level driver for QLogic InfiniPath host channel
6 adapters (HCAs) based on the HT-400 and PE-800 chips.
7
8config INFINIBAND_IPATH 1config INFINIBAND_IPATH
9 tristate "QLogic InfiniPath Verbs Driver" 2 tristate "QLogic InfiniPath Driver"
10 depends on IPATH_CORE && INFINIBAND 3 depends on PCI_MSI && 64BIT && INFINIBAND
11 ---help--- 4 ---help---
12 This is a driver that provides InfiniBand verbs support for 5 This is a driver for QLogic InfiniPath host channel adapters,
13 QLogic InfiniPath host channel adapters (HCAs). This 6 including InfiniBand verbs support. This driver allows these
14 allows these devices to be used with both kernel upper level 7 devices to be used with both kernel upper level protocols such
15 protocols such as IP-over-InfiniBand as well as with userspace 8 as IP-over-InfiniBand as well as with userspace applications
16 applications (in conjunction with InfiniBand userspace access). 9 (in conjunction with InfiniBand userspace access).
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile
index b0bf72864130..5e29cb0095e5 100644
--- a/drivers/infiniband/hw/ipath/Makefile
+++ b/drivers/infiniband/hw/ipath/Makefile
@@ -1,36 +1,35 @@
1EXTRA_CFLAGS += -DIPATH_IDSTR='"QLogic kernel.org driver"' \ 1EXTRA_CFLAGS += -DIPATH_IDSTR='"QLogic kernel.org driver"' \
2 -DIPATH_KERN_TYPE=0 2 -DIPATH_KERN_TYPE=0
3 3
4obj-$(CONFIG_IPATH_CORE) += ipath_core.o
5obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o 4obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o
6 5
7ipath_core-y := \ 6ib_ipath-y := \
7 ipath_cq.o \
8 ipath_diag.o \ 8 ipath_diag.o \
9 ipath_driver.o \ 9 ipath_driver.o \
10 ipath_eeprom.o \ 10 ipath_eeprom.o \
11 ipath_file_ops.o \ 11 ipath_file_ops.o \
12 ipath_fs.o \ 12 ipath_fs.o \
13 ipath_ht400.o \ 13 ipath_iba6110.o \
14 ipath_iba6120.o \
14 ipath_init_chip.o \ 15 ipath_init_chip.o \
15 ipath_intr.o \ 16 ipath_intr.o \
16 ipath_layer.o \
17 ipath_pe800.o \
18 ipath_stats.o \
19 ipath_sysfs.o \
20 ipath_user_pages.o
21
22ipath_core-$(CONFIG_X86_64) += ipath_wc_x86_64.o
23
24ib_ipath-y := \
25 ipath_cq.o \
26 ipath_keys.o \ 17 ipath_keys.o \
18 ipath_layer.o \
27 ipath_mad.o \ 19 ipath_mad.o \
20 ipath_mmap.o \
28 ipath_mr.o \ 21 ipath_mr.o \
29 ipath_qp.o \ 22 ipath_qp.o \
30 ipath_rc.o \ 23 ipath_rc.o \
31 ipath_ruc.o \ 24 ipath_ruc.o \
32 ipath_srq.o \ 25 ipath_srq.o \
26 ipath_stats.o \
27 ipath_sysfs.o \
33 ipath_uc.o \ 28 ipath_uc.o \
34 ipath_ud.o \ 29 ipath_ud.o \
35 ipath_verbs.o \ 30 ipath_user_pages.o \
36 ipath_verbs_mcast.o 31 ipath_verbs_mcast.o \
32 ipath_verbs.o
33
34ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o
35ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o
diff --git a/drivers/infiniband/hw/ipath/ipath_common.h b/drivers/infiniband/hw/ipath/ipath_common.h
index 062bd392e7e5..f577905e3aca 100644
--- a/drivers/infiniband/hw/ipath/ipath_common.h
+++ b/drivers/infiniband/hw/ipath/ipath_common.h
@@ -106,9 +106,9 @@ struct infinipath_stats {
106 __u64 sps_ether_spkts; 106 __u64 sps_ether_spkts;
107 /* number of "ethernet" packets received by driver */ 107 /* number of "ethernet" packets received by driver */
108 __u64 sps_ether_rpkts; 108 __u64 sps_ether_rpkts;
109 /* number of SMA packets sent by driver */ 109 /* number of SMA packets sent by driver. Obsolete. */
110 __u64 sps_sma_spkts; 110 __u64 sps_sma_spkts;
111 /* number of SMA packets received by driver */ 111 /* number of SMA packets received by driver. Obsolete. */
112 __u64 sps_sma_rpkts; 112 __u64 sps_sma_rpkts;
113 /* number of times all ports rcvhdrq was full and packet dropped */ 113 /* number of times all ports rcvhdrq was full and packet dropped */
114 __u64 sps_hdrqfull; 114 __u64 sps_hdrqfull;
@@ -138,7 +138,7 @@ struct infinipath_stats {
138 __u64 sps_pageunlocks; 138 __u64 sps_pageunlocks;
139 /* 139 /*
140 * Number of packets dropped in kernel other than errors (ether 140 * Number of packets dropped in kernel other than errors (ether
141 * packets if ipath not configured, sma/mad, etc.) 141 * packets if ipath not configured, etc.)
142 */ 142 */
143 __u64 sps_krdrops; 143 __u64 sps_krdrops;
144 /* pad for future growth */ 144 /* pad for future growth */
@@ -153,8 +153,6 @@ struct infinipath_stats {
153#define IPATH_STATUS_DISABLED 0x2 /* hardware disabled */ 153#define IPATH_STATUS_DISABLED 0x2 /* hardware disabled */
154/* Device has been disabled via admin request */ 154/* Device has been disabled via admin request */
155#define IPATH_STATUS_ADMIN_DISABLED 0x4 155#define IPATH_STATUS_ADMIN_DISABLED 0x4
156#define IPATH_STATUS_OIB_SMA 0x8 /* ipath_mad kernel SMA running */
157#define IPATH_STATUS_SMA 0x10 /* user SMA running */
158/* Chip has been found and initted */ 156/* Chip has been found and initted */
159#define IPATH_STATUS_CHIP_PRESENT 0x20 157#define IPATH_STATUS_CHIP_PRESENT 0x20
160/* IB link is at ACTIVE, usable for data traffic */ 158/* IB link is at ACTIVE, usable for data traffic */
@@ -465,12 +463,11 @@ struct __ipath_sendpkt {
465 struct ipath_iovec sps_iov[4]; 463 struct ipath_iovec sps_iov[4];
466}; 464};
467 465
468/* Passed into SMA special file's ->read and ->write methods. */ 466/* Passed into diag data special file's ->write method. */
469struct ipath_sma_pkt 467struct ipath_diag_pkt {
470{ 468 __u32 unit;
471 __u32 unit; /* unit on which to send packet */ 469 __u64 data;
472 __u64 data; /* address of payload in userspace */ 470 __u32 len;
473 __u32 len; /* length of payload */
474}; 471};
475 472
476/* 473/*
diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c
index 3efee341c9bc..049221bc590e 100644
--- a/drivers/infiniband/hw/ipath/ipath_cq.c
+++ b/drivers/infiniband/hw/ipath/ipath_cq.c
@@ -42,20 +42,28 @@
42 * @entry: work completion entry to add 42 * @entry: work completion entry to add
43 * @sig: true if @entry is a solicitated entry 43 * @sig: true if @entry is a solicitated entry
44 * 44 *
45 * This may be called with one of the qp->s_lock or qp->r_rq.lock held. 45 * This may be called with qp->s_lock held.
46 */ 46 */
47void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) 47void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
48{ 48{
49 struct ipath_cq_wc *wc = cq->queue;
49 unsigned long flags; 50 unsigned long flags;
51 u32 head;
50 u32 next; 52 u32 next;
51 53
52 spin_lock_irqsave(&cq->lock, flags); 54 spin_lock_irqsave(&cq->lock, flags);
53 55
54 if (cq->head == cq->ibcq.cqe) 56 /*
57 * Note that the head pointer might be writable by user processes.
58 * Take care to verify it is a sane value.
59 */
60 head = wc->head;
61 if (head >= (unsigned) cq->ibcq.cqe) {
62 head = cq->ibcq.cqe;
55 next = 0; 63 next = 0;
56 else 64 } else
57 next = cq->head + 1; 65 next = head + 1;
58 if (unlikely(next == cq->tail)) { 66 if (unlikely(next == wc->tail)) {
59 spin_unlock_irqrestore(&cq->lock, flags); 67 spin_unlock_irqrestore(&cq->lock, flags);
60 if (cq->ibcq.event_handler) { 68 if (cq->ibcq.event_handler) {
61 struct ib_event ev; 69 struct ib_event ev;
@@ -67,8 +75,8 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
67 } 75 }
68 return; 76 return;
69 } 77 }
70 cq->queue[cq->head] = *entry; 78 wc->queue[head] = *entry;
71 cq->head = next; 79 wc->head = next;
72 80
73 if (cq->notify == IB_CQ_NEXT_COMP || 81 if (cq->notify == IB_CQ_NEXT_COMP ||
74 (cq->notify == IB_CQ_SOLICITED && solicited)) { 82 (cq->notify == IB_CQ_SOLICITED && solicited)) {
@@ -101,19 +109,20 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
101int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) 109int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
102{ 110{
103 struct ipath_cq *cq = to_icq(ibcq); 111 struct ipath_cq *cq = to_icq(ibcq);
112 struct ipath_cq_wc *wc = cq->queue;
104 unsigned long flags; 113 unsigned long flags;
105 int npolled; 114 int npolled;
106 115
107 spin_lock_irqsave(&cq->lock, flags); 116 spin_lock_irqsave(&cq->lock, flags);
108 117
109 for (npolled = 0; npolled < num_entries; ++npolled, ++entry) { 118 for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
110 if (cq->tail == cq->head) 119 if (wc->tail == wc->head)
111 break; 120 break;
112 *entry = cq->queue[cq->tail]; 121 *entry = wc->queue[wc->tail];
113 if (cq->tail == cq->ibcq.cqe) 122 if (wc->tail >= cq->ibcq.cqe)
114 cq->tail = 0; 123 wc->tail = 0;
115 else 124 else
116 cq->tail++; 125 wc->tail++;
117 } 126 }
118 127
119 spin_unlock_irqrestore(&cq->lock, flags); 128 spin_unlock_irqrestore(&cq->lock, flags);
@@ -160,38 +169,74 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
160{ 169{
161 struct ipath_ibdev *dev = to_idev(ibdev); 170 struct ipath_ibdev *dev = to_idev(ibdev);
162 struct ipath_cq *cq; 171 struct ipath_cq *cq;
163 struct ib_wc *wc; 172 struct ipath_cq_wc *wc;
164 struct ib_cq *ret; 173 struct ib_cq *ret;
165 174
166 if (entries > ib_ipath_max_cqes) { 175 if (entries < 1 || entries > ib_ipath_max_cqes) {
167 ret = ERR_PTR(-EINVAL); 176 ret = ERR_PTR(-EINVAL);
168 goto bail; 177 goto done;
169 } 178 }
170 179
171 if (dev->n_cqs_allocated == ib_ipath_max_cqs) { 180 if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
172 ret = ERR_PTR(-ENOMEM); 181 ret = ERR_PTR(-ENOMEM);
173 goto bail; 182 goto done;
174 } 183 }
175 184
176 /* 185 /* Allocate the completion queue structure. */
177 * Need to use vmalloc() if we want to support large #s of
178 * entries.
179 */
180 cq = kmalloc(sizeof(*cq), GFP_KERNEL); 186 cq = kmalloc(sizeof(*cq), GFP_KERNEL);
181 if (!cq) { 187 if (!cq) {
182 ret = ERR_PTR(-ENOMEM); 188 ret = ERR_PTR(-ENOMEM);
183 goto bail; 189 goto done;
184 } 190 }
185 191
186 /* 192 /*
187 * Need to use vmalloc() if we want to support large #s of entries. 193 * Allocate the completion queue entries and head/tail pointers.
194 * This is allocated separately so that it can be resized and
195 * also mapped into user space.
196 * We need to use vmalloc() in order to support mmap and large
197 * numbers of entries.
188 */ 198 */
189 wc = vmalloc(sizeof(*wc) * (entries + 1)); 199 wc = vmalloc_user(sizeof(*wc) + sizeof(struct ib_wc) * entries);
190 if (!wc) { 200 if (!wc) {
191 kfree(cq);
192 ret = ERR_PTR(-ENOMEM); 201 ret = ERR_PTR(-ENOMEM);
193 goto bail; 202 goto bail_cq;
194 } 203 }
204
205 /*
206 * Return the address of the WC as the offset to mmap.
207 * See ipath_mmap() for details.
208 */
209 if (udata && udata->outlen >= sizeof(__u64)) {
210 struct ipath_mmap_info *ip;
211 __u64 offset = (__u64) wc;
212 int err;
213
214 err = ib_copy_to_udata(udata, &offset, sizeof(offset));
215 if (err) {
216 ret = ERR_PTR(err);
217 goto bail_wc;
218 }
219
220 /* Allocate info for ipath_mmap(). */
221 ip = kmalloc(sizeof(*ip), GFP_KERNEL);
222 if (!ip) {
223 ret = ERR_PTR(-ENOMEM);
224 goto bail_wc;
225 }
226 cq->ip = ip;
227 ip->context = context;
228 ip->obj = wc;
229 kref_init(&ip->ref);
230 ip->mmap_cnt = 0;
231 ip->size = PAGE_ALIGN(sizeof(*wc) +
232 sizeof(struct ib_wc) * entries);
233 spin_lock_irq(&dev->pending_lock);
234 ip->next = dev->pending_mmaps;
235 dev->pending_mmaps = ip;
236 spin_unlock_irq(&dev->pending_lock);
237 } else
238 cq->ip = NULL;
239
195 /* 240 /*
196 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. 241 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
197 * The number of entries should be >= the number requested or return 242 * The number of entries should be >= the number requested or return
@@ -202,15 +247,22 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
202 cq->triggered = 0; 247 cq->triggered = 0;
203 spin_lock_init(&cq->lock); 248 spin_lock_init(&cq->lock);
204 tasklet_init(&cq->comptask, send_complete, (unsigned long)cq); 249 tasklet_init(&cq->comptask, send_complete, (unsigned long)cq);
205 cq->head = 0; 250 wc->head = 0;
206 cq->tail = 0; 251 wc->tail = 0;
207 cq->queue = wc; 252 cq->queue = wc;
208 253
209 ret = &cq->ibcq; 254 ret = &cq->ibcq;
210 255
211 dev->n_cqs_allocated++; 256 dev->n_cqs_allocated++;
257 goto done;
212 258
213bail: 259bail_wc:
260 vfree(wc);
261
262bail_cq:
263 kfree(cq);
264
265done:
214 return ret; 266 return ret;
215} 267}
216 268
@@ -229,7 +281,10 @@ int ipath_destroy_cq(struct ib_cq *ibcq)
229 281
230 tasklet_kill(&cq->comptask); 282 tasklet_kill(&cq->comptask);
231 dev->n_cqs_allocated--; 283 dev->n_cqs_allocated--;
232 vfree(cq->queue); 284 if (cq->ip)
285 kref_put(&cq->ip->ref, ipath_release_mmap_info);
286 else
287 vfree(cq->queue);
233 kfree(cq); 288 kfree(cq);
234 289
235 return 0; 290 return 0;
@@ -253,7 +308,7 @@ int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
253 spin_lock_irqsave(&cq->lock, flags); 308 spin_lock_irqsave(&cq->lock, flags);
254 /* 309 /*
255 * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow 310 * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
256 * any other transitions. 311 * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
257 */ 312 */
258 if (cq->notify != IB_CQ_NEXT_COMP) 313 if (cq->notify != IB_CQ_NEXT_COMP)
259 cq->notify = notify; 314 cq->notify = notify;
@@ -264,46 +319,86 @@ int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
264int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) 319int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
265{ 320{
266 struct ipath_cq *cq = to_icq(ibcq); 321 struct ipath_cq *cq = to_icq(ibcq);
267 struct ib_wc *wc, *old_wc; 322 struct ipath_cq_wc *old_wc = cq->queue;
268 u32 n; 323 struct ipath_cq_wc *wc;
324 u32 head, tail, n;
269 int ret; 325 int ret;
270 326
327 if (cqe < 1 || cqe > ib_ipath_max_cqes) {
328 ret = -EINVAL;
329 goto bail;
330 }
331
271 /* 332 /*
272 * Need to use vmalloc() if we want to support large #s of entries. 333 * Need to use vmalloc() if we want to support large #s of entries.
273 */ 334 */
274 wc = vmalloc(sizeof(*wc) * (cqe + 1)); 335 wc = vmalloc_user(sizeof(*wc) + sizeof(struct ib_wc) * cqe);
275 if (!wc) { 336 if (!wc) {
276 ret = -ENOMEM; 337 ret = -ENOMEM;
277 goto bail; 338 goto bail;
278 } 339 }
279 340
341 /*
342 * Return the address of the WC as the offset to mmap.
343 * See ipath_mmap() for details.
344 */
345 if (udata && udata->outlen >= sizeof(__u64)) {
346 __u64 offset = (__u64) wc;
347
348 ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
349 if (ret)
350 goto bail;
351 }
352
280 spin_lock_irq(&cq->lock); 353 spin_lock_irq(&cq->lock);
281 if (cq->head < cq->tail) 354 /*
282 n = cq->ibcq.cqe + 1 + cq->head - cq->tail; 355 * Make sure head and tail are sane since they
356 * might be user writable.
357 */
358 head = old_wc->head;
359 if (head > (u32) cq->ibcq.cqe)
360 head = (u32) cq->ibcq.cqe;
361 tail = old_wc->tail;
362 if (tail > (u32) cq->ibcq.cqe)
363 tail = (u32) cq->ibcq.cqe;
364 if (head < tail)
365 n = cq->ibcq.cqe + 1 + head - tail;
283 else 366 else
284 n = cq->head - cq->tail; 367 n = head - tail;
285 if (unlikely((u32)cqe < n)) { 368 if (unlikely((u32)cqe < n)) {
286 spin_unlock_irq(&cq->lock); 369 spin_unlock_irq(&cq->lock);
287 vfree(wc); 370 vfree(wc);
288 ret = -EOVERFLOW; 371 ret = -EOVERFLOW;
289 goto bail; 372 goto bail;
290 } 373 }
291 for (n = 0; cq->tail != cq->head; n++) { 374 for (n = 0; tail != head; n++) {
292 wc[n] = cq->queue[cq->tail]; 375 wc->queue[n] = old_wc->queue[tail];
293 if (cq->tail == cq->ibcq.cqe) 376 if (tail == (u32) cq->ibcq.cqe)
294 cq->tail = 0; 377 tail = 0;
295 else 378 else
296 cq->tail++; 379 tail++;
297 } 380 }
298 cq->ibcq.cqe = cqe; 381 cq->ibcq.cqe = cqe;
299 cq->head = n; 382 wc->head = n;
300 cq->tail = 0; 383 wc->tail = 0;
301 old_wc = cq->queue;
302 cq->queue = wc; 384 cq->queue = wc;
303 spin_unlock_irq(&cq->lock); 385 spin_unlock_irq(&cq->lock);
304 386
305 vfree(old_wc); 387 vfree(old_wc);
306 388
389 if (cq->ip) {
390 struct ipath_ibdev *dev = to_idev(ibcq->device);
391 struct ipath_mmap_info *ip = cq->ip;
392
393 ip->obj = wc;
394 ip->size = PAGE_ALIGN(sizeof(*wc) +
395 sizeof(struct ib_wc) * cqe);
396 spin_lock_irq(&dev->pending_lock);
397 ip->next = dev->pending_mmaps;
398 dev->pending_mmaps = ip;
399 spin_unlock_irq(&dev->pending_lock);
400 }
401
307 ret = 0; 402 ret = 0;
308 403
309bail: 404bail:
diff --git a/drivers/infiniband/hw/ipath/ipath_debug.h b/drivers/infiniband/hw/ipath/ipath_debug.h
index f415beda0d32..df69f0d80b8b 100644
--- a/drivers/infiniband/hw/ipath/ipath_debug.h
+++ b/drivers/infiniband/hw/ipath/ipath_debug.h
@@ -60,7 +60,6 @@
60#define __IPATH_USER_SEND 0x1000 /* use user mode send */ 60#define __IPATH_USER_SEND 0x1000 /* use user mode send */
61#define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */ 61#define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */
62#define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */ 62#define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */
63#define __IPATH_SMADBG 0x8000 /* sma packet debug */
64#define __IPATH_IPATHDBG 0x10000 /* Ethernet (IPATH) gen debug */ 63#define __IPATH_IPATHDBG 0x10000 /* Ethernet (IPATH) gen debug */
65#define __IPATH_IPATHWARN 0x20000 /* Ethernet (IPATH) warnings */ 64#define __IPATH_IPATHWARN 0x20000 /* Ethernet (IPATH) warnings */
66#define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors */ 65#define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors */
@@ -84,7 +83,6 @@
84/* print mmap/nopage stuff, not using VDBG any more */ 83/* print mmap/nopage stuff, not using VDBG any more */
85#define __IPATH_MMDBG 0x0 84#define __IPATH_MMDBG 0x0
86#define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */ 85#define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */
87#define __IPATH_SMADBG 0x0 /* process startup (init)/exit messages */
88#define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */ 86#define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */
89#define __IPATH_IPATHWARN 0x0 /* Ethernet (IPATH) warnings on */ 87#define __IPATH_IPATHWARN 0x0 /* Ethernet (IPATH) warnings on */
90#define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */ 88#define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
index 147dd89e21c9..29958b6e0214 100644
--- a/drivers/infiniband/hw/ipath/ipath_diag.c
+++ b/drivers/infiniband/hw/ipath/ipath_diag.c
@@ -41,11 +41,12 @@
41 * through the /sys/bus/pci resource mmap interface. 41 * through the /sys/bus/pci resource mmap interface.
42 */ 42 */
43 43
44#include <linux/io.h>
44#include <linux/pci.h> 45#include <linux/pci.h>
46#include <linux/vmalloc.h>
45#include <asm/uaccess.h> 47#include <asm/uaccess.h>
46 48
47#include "ipath_kernel.h" 49#include "ipath_kernel.h"
48#include "ipath_layer.h"
49#include "ipath_common.h" 50#include "ipath_common.h"
50 51
51int ipath_diag_inuse; 52int ipath_diag_inuse;
@@ -274,6 +275,158 @@ bail:
274 return ret; 275 return ret;
275} 276}
276 277
278static ssize_t ipath_diagpkt_write(struct file *fp,
279 const char __user *data,
280 size_t count, loff_t *off);
281
282static struct file_operations diagpkt_file_ops = {
283 .owner = THIS_MODULE,
284 .write = ipath_diagpkt_write,
285};
286
287static struct cdev *diagpkt_cdev;
288static struct class_device *diagpkt_class_dev;
289
290int __init ipath_diagpkt_add(void)
291{
292 return ipath_cdev_init(IPATH_DIAGPKT_MINOR,
293 "ipath_diagpkt", &diagpkt_file_ops,
294 &diagpkt_cdev, &diagpkt_class_dev);
295}
296
297void __exit ipath_diagpkt_remove(void)
298{
299 ipath_cdev_cleanup(&diagpkt_cdev, &diagpkt_class_dev);
300}
301
302/**
303 * ipath_diagpkt_write - write an IB packet
304 * @fp: the diag data device file pointer
305 * @data: ipath_diag_pkt structure saying where to get the packet
306 * @count: size of data to write
307 * @off: unused by this code
308 */
309static ssize_t ipath_diagpkt_write(struct file *fp,
310 const char __user *data,
311 size_t count, loff_t *off)
312{
313 u32 __iomem *piobuf;
314 u32 plen, clen, pbufn;
315 struct ipath_diag_pkt dp;
316 u32 *tmpbuf = NULL;
317 struct ipath_devdata *dd;
318 ssize_t ret = 0;
319 u64 val;
320
321 if (count < sizeof(dp)) {
322 ret = -EINVAL;
323 goto bail;
324 }
325
326 if (copy_from_user(&dp, data, sizeof(dp))) {
327 ret = -EFAULT;
328 goto bail;
329 }
330
331 /* send count must be an exact number of dwords */
332 if (dp.len & 3) {
333 ret = -EINVAL;
334 goto bail;
335 }
336
337 clen = dp.len >> 2;
338
339 dd = ipath_lookup(dp.unit);
340 if (!dd || !(dd->ipath_flags & IPATH_PRESENT) ||
341 !dd->ipath_kregbase) {
342 ipath_cdbg(VERBOSE, "illegal unit %u for diag data send\n",
343 dp.unit);
344 ret = -ENODEV;
345 goto bail;
346 }
347
348 if (ipath_diag_inuse && !diag_set_link &&
349 !(dd->ipath_flags & IPATH_LINKACTIVE)) {
350 diag_set_link = 1;
351 ipath_cdbg(VERBOSE, "Trying to set to set link active for "
352 "diag pkt\n");
353 ipath_set_linkstate(dd, IPATH_IB_LINKARM);
354 ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE);
355 }
356
357 if (!(dd->ipath_flags & IPATH_INITTED)) {
358 /* no hardware, freeze, etc. */
359 ipath_cdbg(VERBOSE, "unit %u not usable\n", dd->ipath_unit);
360 ret = -ENODEV;
361 goto bail;
362 }
363 val = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK;
364 if (val != IPATH_IBSTATE_INIT && val != IPATH_IBSTATE_ARM &&
365 val != IPATH_IBSTATE_ACTIVE) {
366 ipath_cdbg(VERBOSE, "unit %u not ready (state %llx)\n",
367 dd->ipath_unit, (unsigned long long) val);
368 ret = -EINVAL;
369 goto bail;
370 }
371
372 /* need total length before first word written */
373 /* +1 word is for the qword padding */
374 plen = sizeof(u32) + dp.len;
375
376 if ((plen + 4) > dd->ipath_ibmaxlen) {
377 ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n",
378 plen - 4, dd->ipath_ibmaxlen);
379 ret = -EINVAL;
380 goto bail; /* before writing pbc */
381 }
382 tmpbuf = vmalloc(plen);
383 if (!tmpbuf) {
384 dev_info(&dd->pcidev->dev, "Unable to allocate tmp buffer, "
385 "failing\n");
386 ret = -ENOMEM;
387 goto bail;
388 }
389
390 if (copy_from_user(tmpbuf,
391 (const void __user *) (unsigned long) dp.data,
392 dp.len)) {
393 ret = -EFAULT;
394 goto bail;
395 }
396
397 piobuf = ipath_getpiobuf(dd, &pbufn);
398 if (!piobuf) {
399 ipath_cdbg(VERBOSE, "No PIO buffers avail unit for %u\n",
400 dd->ipath_unit);
401 ret = -EBUSY;
402 goto bail;
403 }
404
405 plen >>= 2; /* in dwords */
406
407 if (ipath_debug & __IPATH_PKTDBG)
408 ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n",
409 dd->ipath_unit, plen - 1, pbufn);
410
411 /* we have to flush after the PBC for correctness on some cpus
412 * or WC buffer can be written out of order */
413 writeq(plen, piobuf);
414 ipath_flush_wc();
415 /* copy all by the trigger word, then flush, so it's written
416 * to chip before trigger word, then write trigger word, then
417 * flush again, so packet is sent. */
418 __iowrite32_copy(piobuf + 2, tmpbuf, clen - 1);
419 ipath_flush_wc();
420 __raw_writel(tmpbuf[clen - 1], piobuf + clen + 1);
421 ipath_flush_wc();
422
423 ret = sizeof(dp);
424
425bail:
426 vfree(tmpbuf);
427 return ret;
428}
429
277static int ipath_diag_release(struct inode *in, struct file *fp) 430static int ipath_diag_release(struct inode *in, struct file *fp)
278{ 431{
279 mutex_lock(&ipath_mutex); 432 mutex_lock(&ipath_mutex);
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index f98518d912b5..2108466c7e33 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -39,7 +39,7 @@
39#include <linux/vmalloc.h> 39#include <linux/vmalloc.h>
40 40
41#include "ipath_kernel.h" 41#include "ipath_kernel.h"
42#include "ipath_layer.h" 42#include "ipath_verbs.h"
43#include "ipath_common.h" 43#include "ipath_common.h"
44 44
45static void ipath_update_pio_bufs(struct ipath_devdata *); 45static void ipath_update_pio_bufs(struct ipath_devdata *);
@@ -51,8 +51,6 @@ const char *ipath_get_unit_name(int unit)
51 return iname; 51 return iname;
52} 52}
53 53
54EXPORT_SYMBOL_GPL(ipath_get_unit_name);
55
56#define DRIVER_LOAD_MSG "QLogic " IPATH_DRV_NAME " loaded: " 54#define DRIVER_LOAD_MSG "QLogic " IPATH_DRV_NAME " loaded: "
57#define PFX IPATH_DRV_NAME ": " 55#define PFX IPATH_DRV_NAME ": "
58 56
@@ -60,13 +58,13 @@ EXPORT_SYMBOL_GPL(ipath_get_unit_name);
60 * The size has to be longer than this string, so we can append 58 * The size has to be longer than this string, so we can append
61 * board/chip information to it in the init code. 59 * board/chip information to it in the init code.
62 */ 60 */
63const char ipath_core_version[] = IPATH_IDSTR "\n"; 61const char ib_ipath_version[] = IPATH_IDSTR "\n";
64 62
65static struct idr unit_table; 63static struct idr unit_table;
66DEFINE_SPINLOCK(ipath_devs_lock); 64DEFINE_SPINLOCK(ipath_devs_lock);
67LIST_HEAD(ipath_dev_list); 65LIST_HEAD(ipath_dev_list);
68 66
69wait_queue_head_t ipath_sma_state_wait; 67wait_queue_head_t ipath_state_wait;
70 68
71unsigned ipath_debug = __IPATH_INFO; 69unsigned ipath_debug = __IPATH_INFO;
72 70
@@ -403,10 +401,10 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
403 /* setup the chip-specific functions, as early as possible. */ 401 /* setup the chip-specific functions, as early as possible. */
404 switch (ent->device) { 402 switch (ent->device) {
405 case PCI_DEVICE_ID_INFINIPATH_HT: 403 case PCI_DEVICE_ID_INFINIPATH_HT:
406 ipath_init_ht400_funcs(dd); 404 ipath_init_iba6110_funcs(dd);
407 break; 405 break;
408 case PCI_DEVICE_ID_INFINIPATH_PE800: 406 case PCI_DEVICE_ID_INFINIPATH_PE800:
409 ipath_init_pe800_funcs(dd); 407 ipath_init_iba6120_funcs(dd);
410 break; 408 break;
411 default: 409 default:
412 ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, " 410 ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
@@ -440,7 +438,13 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
440 } 438 }
441 dd->ipath_pcirev = rev; 439 dd->ipath_pcirev = rev;
442 440
441#if defined(__powerpc__)
442 /* There isn't a generic way to specify writethrough mappings */
443 dd->ipath_kregbase = __ioremap(addr, len,
444 (_PAGE_NO_CACHE|_PAGE_WRITETHRU));
445#else
443 dd->ipath_kregbase = ioremap_nocache(addr, len); 446 dd->ipath_kregbase = ioremap_nocache(addr, len);
447#endif
444 448
445 if (!dd->ipath_kregbase) { 449 if (!dd->ipath_kregbase) {
446 ipath_dbg("Unable to map io addr %llx to kvirt, failing\n", 450 ipath_dbg("Unable to map io addr %llx to kvirt, failing\n",
@@ -503,7 +507,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
503 ipathfs_add_device(dd); 507 ipathfs_add_device(dd);
504 ipath_user_add(dd); 508 ipath_user_add(dd);
505 ipath_diag_add(dd); 509 ipath_diag_add(dd);
506 ipath_layer_add(dd); 510 ipath_register_ib_device(dd);
507 511
508 goto bail; 512 goto bail;
509 513
@@ -532,7 +536,7 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev)
532 return; 536 return;
533 537
534 dd = pci_get_drvdata(pdev); 538 dd = pci_get_drvdata(pdev);
535 ipath_layer_remove(dd); 539 ipath_unregister_ib_device(dd->verbs_dev);
536 ipath_diag_remove(dd); 540 ipath_diag_remove(dd);
537 ipath_user_remove(dd); 541 ipath_user_remove(dd);
538 ipathfs_remove_device(dd); 542 ipathfs_remove_device(dd);
@@ -607,21 +611,23 @@ void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
607 * 611 *
608 * wait up to msecs milliseconds for IB link state change to occur for 612 * wait up to msecs milliseconds for IB link state change to occur for
609 * now, take the easy polling route. Currently used only by 613 * now, take the easy polling route. Currently used only by
610 * ipath_layer_set_linkstate. Returns 0 if state reached, otherwise 614 * ipath_set_linkstate. Returns 0 if state reached, otherwise
611 * -ETIMEDOUT state can have multiple states set, for any of several 615 * -ETIMEDOUT state can have multiple states set, for any of several
612 * transitions. 616 * transitions.
613 */ 617 */
614int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs) 618static int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state,
619 int msecs)
615{ 620{
616 dd->ipath_sma_state_wanted = state; 621 dd->ipath_state_wanted = state;
617 wait_event_interruptible_timeout(ipath_sma_state_wait, 622 wait_event_interruptible_timeout(ipath_state_wait,
618 (dd->ipath_flags & state), 623 (dd->ipath_flags & state),
619 msecs_to_jiffies(msecs)); 624 msecs_to_jiffies(msecs));
620 dd->ipath_sma_state_wanted = 0; 625 dd->ipath_state_wanted = 0;
621 626
622 if (!(dd->ipath_flags & state)) { 627 if (!(dd->ipath_flags & state)) {
623 u64 val; 628 u64 val;
624 ipath_cdbg(SMA, "Didn't reach linkstate %s within %u ms\n", 629 ipath_cdbg(VERBOSE, "Didn't reach linkstate %s within %u"
630 " ms\n",
625 /* test INIT ahead of DOWN, both can be set */ 631 /* test INIT ahead of DOWN, both can be set */
626 (state & IPATH_LINKINIT) ? "INIT" : 632 (state & IPATH_LINKINIT) ? "INIT" :
627 ((state & IPATH_LINKDOWN) ? "DOWN" : 633 ((state & IPATH_LINKDOWN) ? "DOWN" :
@@ -807,58 +813,6 @@ bail:
807 return skb; 813 return skb;
808} 814}
809 815
810/**
811 * ipath_rcv_layer - receive a packet for the layered (ethernet) driver
812 * @dd: the infinipath device
813 * @etail: the sk_buff number
814 * @tlen: the total packet length
815 * @hdr: the ethernet header
816 *
817 * Separate routine for better overall optimization
818 */
819static void ipath_rcv_layer(struct ipath_devdata *dd, u32 etail,
820 u32 tlen, struct ether_header *hdr)
821{
822 u32 elen;
823 u8 pad, *bthbytes;
824 struct sk_buff *skb, *nskb;
825
826 if (dd->ipath_port0_skbs &&
827 hdr->sub_opcode == IPATH_ITH4X_OPCODE_ENCAP) {
828 /*
829 * Allocate a new sk_buff to replace the one we give
830 * to the network stack.
831 */
832 nskb = ipath_alloc_skb(dd, GFP_ATOMIC);
833 if (!nskb) {
834 /* count OK packets that we drop */
835 ipath_stats.sps_krdrops++;
836 return;
837 }
838
839 bthbytes = (u8 *) hdr->bth;
840 pad = (bthbytes[1] >> 4) & 3;
841 /* +CRC32 */
842 elen = tlen - (sizeof(*hdr) + pad + sizeof(u32));
843
844 skb = dd->ipath_port0_skbs[etail];
845 dd->ipath_port0_skbs[etail] = nskb;
846 skb_put(skb, elen);
847
848 dd->ipath_f_put_tid(dd, etail + (u64 __iomem *)
849 ((char __iomem *) dd->ipath_kregbase
850 + dd->ipath_rcvegrbase), 0,
851 virt_to_phys(nskb->data));
852
853 __ipath_layer_rcv(dd, hdr, skb);
854
855 /* another ether packet received */
856 ipath_stats.sps_ether_rpkts++;
857 }
858 else if (hdr->sub_opcode == IPATH_ITH4X_OPCODE_LID_ARP)
859 __ipath_layer_rcv_lid(dd, hdr);
860}
861
862static void ipath_rcv_hdrerr(struct ipath_devdata *dd, 816static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
863 u32 eflags, 817 u32 eflags,
864 u32 l, 818 u32 l,
@@ -972,26 +926,17 @@ reloop:
972 if (unlikely(eflags)) 926 if (unlikely(eflags))
973 ipath_rcv_hdrerr(dd, eflags, l, etail, rc); 927 ipath_rcv_hdrerr(dd, eflags, l, etail, rc);
974 else if (etype == RCVHQ_RCV_TYPE_NON_KD) { 928 else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
975 int ret = __ipath_verbs_rcv(dd, rc + 1, 929 ipath_ib_rcv(dd->verbs_dev, rc + 1, ebuf, tlen);
976 ebuf, tlen); 930 if (dd->ipath_lli_counter)
977 if (ret == -ENODEV) 931 dd->ipath_lli_counter--;
978 ipath_cdbg(VERBOSE, 932 ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
979 "received IB packet, " 933 "qp=%x), len %x; ignored\n",
980 "not SMA (QP=%x)\n", qp); 934 etype, bthbytes[0], qp, tlen);
981 if (dd->ipath_lli_counter)
982 dd->ipath_lli_counter--;
983
984 } else if (etype == RCVHQ_RCV_TYPE_EAGER) {
985 if (qp == IPATH_KD_QP &&
986 bthbytes[0] == ipath_layer_rcv_opcode &&
987 ebuf)
988 ipath_rcv_layer(dd, etail, tlen,
989 (struct ether_header *)hdr);
990 else
991 ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
992 "qp=%x), len %x; ignored\n",
993 etype, bthbytes[0], qp, tlen);
994 } 935 }
936 else if (etype == RCVHQ_RCV_TYPE_EAGER)
937 ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
938 "qp=%x), len %x; ignored\n",
939 etype, bthbytes[0], qp, tlen);
995 else if (etype == RCVHQ_RCV_TYPE_EXPECTED) 940 else if (etype == RCVHQ_RCV_TYPE_EXPECTED)
996 ipath_dbg("Bug: Expected TID, opcode %x; ignored\n", 941 ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
997 be32_to_cpu(hdr->bth[0]) & 0xff); 942 be32_to_cpu(hdr->bth[0]) & 0xff);
@@ -1024,7 +969,8 @@ reloop:
1024 */ 969 */
1025 if (l == hdrqtail || (i && !(i&0xf))) { 970 if (l == hdrqtail || (i && !(i&0xf))) {
1026 u64 lval; 971 u64 lval;
1027 if (l == hdrqtail) /* PE-800 interrupt only on last */ 972 if (l == hdrqtail)
973 /* request IBA6120 interrupt only on last */
1028 lval = dd->ipath_rhdrhead_intr_off | l; 974 lval = dd->ipath_rhdrhead_intr_off | l;
1029 else 975 else
1030 lval = l; 976 lval = l;
@@ -1038,7 +984,7 @@ reloop:
1038 } 984 }
1039 985
1040 if (!dd->ipath_rhdrhead_intr_off && !reloop) { 986 if (!dd->ipath_rhdrhead_intr_off && !reloop) {
1041 /* HT-400 workaround; we can have a race clearing chip 987 /* IBA6110 workaround; we can have a race clearing chip
1042 * interrupt with another interrupt about to be delivered, 988 * interrupt with another interrupt about to be delivered,
1043 * and can clear it before it is delivered on the GPIO 989 * and can clear it before it is delivered on the GPIO
1044 * workaround. By doing the extra check here for the 990 * workaround. By doing the extra check here for the
@@ -1211,7 +1157,7 @@ int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize)
1211 * 1157 *
1212 * do appropriate marking as busy, etc. 1158 * do appropriate marking as busy, etc.
1213 * returns buffer number if one found (>=0), negative number is error. 1159 * returns buffer number if one found (>=0), negative number is error.
1214 * Used by ipath_sma_send_pkt and ipath_layer_send 1160 * Used by ipath_layer_send
1215 */ 1161 */
1216u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 * pbufnum) 1162u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 * pbufnum)
1217{ 1163{
@@ -1317,13 +1263,6 @@ rescan:
1317 goto bail; 1263 goto bail;
1318 } 1264 }
1319 1265
1320 if (updated)
1321 /*
1322 * ran out of bufs, now some (at least this one we just
1323 * got) are now available, so tell the layered driver.
1324 */
1325 __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE);
1326
1327 /* 1266 /*
1328 * set next starting place. Since it's just an optimization, 1267 * set next starting place. Since it's just an optimization,
1329 * it doesn't matter who wins on this, so no locking 1268 * it doesn't matter who wins on this, so no locking
@@ -1500,7 +1439,7 @@ int ipath_waitfor_mdio_cmdready(struct ipath_devdata *dd)
1500 return ret; 1439 return ret;
1501} 1440}
1502 1441
1503void ipath_set_ib_lstate(struct ipath_devdata *dd, int which) 1442static void ipath_set_ib_lstate(struct ipath_devdata *dd, int which)
1504{ 1443{
1505 static const char *what[4] = { 1444 static const char *what[4] = {
1506 [0] = "DOWN", 1445 [0] = "DOWN",
@@ -1511,7 +1450,7 @@ void ipath_set_ib_lstate(struct ipath_devdata *dd, int which)
1511 int linkcmd = (which >> INFINIPATH_IBCC_LINKCMD_SHIFT) & 1450 int linkcmd = (which >> INFINIPATH_IBCC_LINKCMD_SHIFT) &
1512 INFINIPATH_IBCC_LINKCMD_MASK; 1451 INFINIPATH_IBCC_LINKCMD_MASK;
1513 1452
1514 ipath_cdbg(SMA, "Trying to move unit %u to %s, current ltstate " 1453 ipath_cdbg(VERBOSE, "Trying to move unit %u to %s, current ltstate "
1515 "is %s\n", dd->ipath_unit, 1454 "is %s\n", dd->ipath_unit,
1516 what[linkcmd], 1455 what[linkcmd],
1517 ipath_ibcstatus_str[ 1456 ipath_ibcstatus_str[
@@ -1520,7 +1459,7 @@ void ipath_set_ib_lstate(struct ipath_devdata *dd, int which)
1520 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) & 1459 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
1521 INFINIPATH_IBCS_LINKTRAININGSTATE_MASK]); 1460 INFINIPATH_IBCS_LINKTRAININGSTATE_MASK]);
1522 /* flush all queued sends when going to DOWN or INIT, to be sure that 1461 /* flush all queued sends when going to DOWN or INIT, to be sure that
1523 * they don't block SMA and other MAD packets */ 1462 * they don't block MAD packets */
1524 if (!linkcmd || linkcmd == INFINIPATH_IBCC_LINKCMD_INIT) { 1463 if (!linkcmd || linkcmd == INFINIPATH_IBCC_LINKCMD_INIT) {
1525 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 1464 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1526 INFINIPATH_S_ABORT); 1465 INFINIPATH_S_ABORT);
@@ -1534,6 +1473,180 @@ void ipath_set_ib_lstate(struct ipath_devdata *dd, int which)
1534 dd->ipath_ibcctrl | which); 1473 dd->ipath_ibcctrl | which);
1535} 1474}
1536 1475
1476int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
1477{
1478 u32 lstate;
1479 int ret;
1480
1481 switch (newstate) {
1482 case IPATH_IB_LINKDOWN:
1483 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL <<
1484 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
1485 /* don't wait */
1486 ret = 0;
1487 goto bail;
1488
1489 case IPATH_IB_LINKDOWN_SLEEP:
1490 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP <<
1491 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
1492 /* don't wait */
1493 ret = 0;
1494 goto bail;
1495
1496 case IPATH_IB_LINKDOWN_DISABLE:
1497 ipath_set_ib_lstate(dd,
1498 INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
1499 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
1500 /* don't wait */
1501 ret = 0;
1502 goto bail;
1503
1504 case IPATH_IB_LINKINIT:
1505 if (dd->ipath_flags & IPATH_LINKINIT) {
1506 ret = 0;
1507 goto bail;
1508 }
1509 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT <<
1510 INFINIPATH_IBCC_LINKCMD_SHIFT);
1511 lstate = IPATH_LINKINIT;
1512 break;
1513
1514 case IPATH_IB_LINKARM:
1515 if (dd->ipath_flags & IPATH_LINKARMED) {
1516 ret = 0;
1517 goto bail;
1518 }
1519 if (!(dd->ipath_flags &
1520 (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
1521 ret = -EINVAL;
1522 goto bail;
1523 }
1524 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED <<
1525 INFINIPATH_IBCC_LINKCMD_SHIFT);
1526 /*
1527 * Since the port can transition to ACTIVE by receiving
1528 * a non VL 15 packet, wait for either state.
1529 */
1530 lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
1531 break;
1532
1533 case IPATH_IB_LINKACTIVE:
1534 if (dd->ipath_flags & IPATH_LINKACTIVE) {
1535 ret = 0;
1536 goto bail;
1537 }
1538 if (!(dd->ipath_flags & IPATH_LINKARMED)) {
1539 ret = -EINVAL;
1540 goto bail;
1541 }
1542 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE <<
1543 INFINIPATH_IBCC_LINKCMD_SHIFT);
1544 lstate = IPATH_LINKACTIVE;
1545 break;
1546
1547 default:
1548 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
1549 ret = -EINVAL;
1550 goto bail;
1551 }
1552 ret = ipath_wait_linkstate(dd, lstate, 2000);
1553
1554bail:
1555 return ret;
1556}
1557
1558/**
1559 * ipath_set_mtu - set the MTU
1560 * @dd: the infinipath device
1561 * @arg: the new MTU
1562 *
1563 * we can handle "any" incoming size, the issue here is whether we
1564 * need to restrict our outgoing size. For now, we don't do any
1565 * sanity checking on this, and we don't deal with what happens to
1566 * programs that are already running when the size changes.
1567 * NOTE: changing the MTU will usually cause the IBC to go back to
1568 * link initialize (IPATH_IBSTATE_INIT) state...
1569 */
1570int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
1571{
1572 u32 piosize;
1573 int changed = 0;
1574 int ret;
1575
1576 /*
1577 * mtu is IB data payload max. It's the largest power of 2 less
1578 * than piosize (or even larger, since it only really controls the
1579 * largest we can receive; we can send the max of the mtu and
1580 * piosize). We check that it's one of the valid IB sizes.
1581 */
1582 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
1583 arg != 4096) {
1584 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
1585 ret = -EINVAL;
1586 goto bail;
1587 }
1588 if (dd->ipath_ibmtu == arg) {
1589 ret = 0; /* same as current */
1590 goto bail;
1591 }
1592
1593 piosize = dd->ipath_ibmaxlen;
1594 dd->ipath_ibmtu = arg;
1595
1596 if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
1597 /* Only if it's not the initial value (or reset to it) */
1598 if (piosize != dd->ipath_init_ibmaxlen) {
1599 dd->ipath_ibmaxlen = piosize;
1600 changed = 1;
1601 }
1602 } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
1603 piosize = arg + IPATH_PIO_MAXIBHDR;
1604 ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
1605 "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
1606 arg);
1607 dd->ipath_ibmaxlen = piosize;
1608 changed = 1;
1609 }
1610
1611 if (changed) {
1612 /*
1613 * set the IBC maxpktlength to the size of our pio
1614 * buffers in words
1615 */
1616 u64 ibc = dd->ipath_ibcctrl;
1617 ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
1618 INFINIPATH_IBCC_MAXPKTLEN_SHIFT);
1619
1620 piosize = piosize - 2 * sizeof(u32); /* ignore pbc */
1621 dd->ipath_ibmaxlen = piosize;
1622 piosize /= sizeof(u32); /* in words */
1623 /*
1624 * for ICRC, which we only send in diag test pkt mode, and
1625 * we don't need to worry about that for mtu
1626 */
1627 piosize += 1;
1628
1629 ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
1630 dd->ipath_ibcctrl = ibc;
1631 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1632 dd->ipath_ibcctrl);
1633 dd->ipath_f_tidtemplate(dd);
1634 }
1635
1636 ret = 0;
1637
1638bail:
1639 return ret;
1640}
1641
1642int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
1643{
1644 dd->ipath_lid = arg;
1645 dd->ipath_lmc = lmc;
1646
1647 return 0;
1648}
1649
1537/** 1650/**
1538 * ipath_read_kreg64_port - read a device's per-port 64-bit kernel register 1651 * ipath_read_kreg64_port - read a device's per-port 64-bit kernel register
1539 * @dd: the infinipath device 1652 * @dd: the infinipath device
@@ -1637,13 +1750,6 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
1637 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_DISABLE << 1750 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
1638 INFINIPATH_IBCC_LINKINITCMD_SHIFT); 1751 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
1639 1752
1640 /*
1641 * we are shutting down, so tell the layered driver. We don't do
1642 * this on just a link state change, much like ethernet, a cable
1643 * unplug, etc. doesn't change driver state
1644 */
1645 ipath_layer_intr(dd, IPATH_LAYER_INT_IF_DOWN);
1646
1647 /* disable IBC */ 1753 /* disable IBC */
1648 dd->ipath_control &= ~INFINIPATH_C_LINKENABLE; 1754 dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
1649 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 1755 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
@@ -1743,7 +1849,7 @@ static int __init infinipath_init(void)
1743{ 1849{
1744 int ret; 1850 int ret;
1745 1851
1746 ipath_dbg(KERN_INFO DRIVER_LOAD_MSG "%s", ipath_core_version); 1852 ipath_dbg(KERN_INFO DRIVER_LOAD_MSG "%s", ib_ipath_version);
1747 1853
1748 /* 1854 /*
1749 * These must be called before the driver is registered with 1855 * These must be called before the driver is registered with
@@ -1776,8 +1882,18 @@ static int __init infinipath_init(void)
1776 goto bail_group; 1882 goto bail_group;
1777 } 1883 }
1778 1884
1885 ret = ipath_diagpkt_add();
1886 if (ret < 0) {
1887 printk(KERN_ERR IPATH_DRV_NAME ": Unable to create "
1888 "diag data device: error %d\n", -ret);
1889 goto bail_ipathfs;
1890 }
1891
1779 goto bail; 1892 goto bail;
1780 1893
1894bail_ipathfs:
1895 ipath_exit_ipathfs();
1896
1781bail_group: 1897bail_group:
1782 ipath_driver_remove_group(&ipath_driver.driver); 1898 ipath_driver_remove_group(&ipath_driver.driver);
1783 1899
@@ -1888,6 +2004,8 @@ static void __exit infinipath_cleanup(void)
1888 struct ipath_devdata *dd, *tmp; 2004 struct ipath_devdata *dd, *tmp;
1889 unsigned long flags; 2005 unsigned long flags;
1890 2006
2007 ipath_diagpkt_remove();
2008
1891 ipath_exit_ipathfs(); 2009 ipath_exit_ipathfs();
1892 2010
1893 ipath_driver_remove_group(&ipath_driver.driver); 2011 ipath_driver_remove_group(&ipath_driver.driver);
@@ -1998,5 +2116,22 @@ bail:
1998 return ret; 2116 return ret;
1999} 2117}
2000 2118
2119int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv)
2120{
2121 u64 val;
2122 if ( new_pol_inv > INFINIPATH_XGXS_RX_POL_MASK ) {
2123 return -1;
2124 }
2125 if ( dd->ipath_rx_pol_inv != new_pol_inv ) {
2126 dd->ipath_rx_pol_inv = new_pol_inv;
2127 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
2128 val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
2129 INFINIPATH_XGXS_RX_POL_SHIFT);
2130 val |= ((u64)dd->ipath_rx_pol_inv) <<
2131 INFINIPATH_XGXS_RX_POL_SHIFT;
2132 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
2133 }
2134 return 0;
2135}
2001module_init(infinipath_init); 2136module_init(infinipath_init);
2002module_exit(infinipath_cleanup); 2137module_exit(infinipath_cleanup);
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index bbaa70e57db1..29930e22318e 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -39,7 +39,6 @@
39#include <asm/pgtable.h> 39#include <asm/pgtable.h>
40 40
41#include "ipath_kernel.h" 41#include "ipath_kernel.h"
42#include "ipath_layer.h"
43#include "ipath_common.h" 42#include "ipath_common.h"
44 43
45static int ipath_open(struct inode *, struct file *); 44static int ipath_open(struct inode *, struct file *);
@@ -985,15 +984,17 @@ static int mmap_piobufs(struct vm_area_struct *vma,
985 * write combining behavior we want on the PIO buffers! 984 * write combining behavior we want on the PIO buffers!
986 */ 985 */
987 986
988 if (vma->vm_flags & VM_READ) { 987#if defined(__powerpc__)
989 dev_info(&dd->pcidev->dev, 988 /* There isn't a generic way to specify writethrough mappings */
990 "Can't map piobufs as readable (flags=%lx)\n", 989 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
991 vma->vm_flags); 990 pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
992 ret = -EPERM; 991 pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
993 goto bail; 992#endif
994 }
995 993
996 /* don't allow them to later change to readable with mprotect */ 994 /*
995 * don't allow them to later change to readable with mprotect (for when
996 * not initially mapped readable, as is normally the case)
997 */
997 vma->vm_flags &= ~VM_MAYREAD; 998 vma->vm_flags &= ~VM_MAYREAD;
998 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; 999 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
999 1000
@@ -1109,7 +1110,7 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
1109 ret = mmap_rcvegrbufs(vma, pd); 1110 ret = mmap_rcvegrbufs(vma, pd);
1110 else if (pgaddr == (u64) pd->port_rcvhdrq_phys) { 1111 else if (pgaddr == (u64) pd->port_rcvhdrq_phys) {
1111 /* 1112 /*
1112 * The rcvhdrq itself; readonly except on HT-400 (so have 1113 * The rcvhdrq itself; readonly except on HT (so have
1113 * to allow writable mapping), multiple pages, contiguous 1114 * to allow writable mapping), multiple pages, contiguous
1114 * from an i/o perspective. 1115 * from an i/o perspective.
1115 */ 1116 */
@@ -1149,6 +1150,7 @@ static unsigned int ipath_poll(struct file *fp,
1149 struct ipath_portdata *pd; 1150 struct ipath_portdata *pd;
1150 u32 head, tail; 1151 u32 head, tail;
1151 int bit; 1152 int bit;
1153 unsigned pollflag = 0;
1152 struct ipath_devdata *dd; 1154 struct ipath_devdata *dd;
1153 1155
1154 pd = port_fp(fp); 1156 pd = port_fp(fp);
@@ -1185,9 +1187,12 @@ static unsigned int ipath_poll(struct file *fp,
1185 clear_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag); 1187 clear_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
1186 pd->port_rcvwait_to++; 1188 pd->port_rcvwait_to++;
1187 } 1189 }
1190 else
1191 pollflag = POLLIN | POLLRDNORM;
1188 } 1192 }
1189 else { 1193 else {
1190 /* it's already happened; don't do wait_event overhead */ 1194 /* it's already happened; don't do wait_event overhead */
1195 pollflag = POLLIN | POLLRDNORM;
1191 pd->port_rcvnowait++; 1196 pd->port_rcvnowait++;
1192 } 1197 }
1193 1198
@@ -1195,7 +1200,7 @@ static unsigned int ipath_poll(struct file *fp,
1195 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 1200 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1196 dd->ipath_rcvctrl); 1201 dd->ipath_rcvctrl);
1197 1202
1198 return 0; 1203 return pollflag;
1199} 1204}
1200 1205
1201static int try_alloc_port(struct ipath_devdata *dd, int port, 1206static int try_alloc_port(struct ipath_devdata *dd, int port,
@@ -1297,14 +1302,14 @@ static int find_best_unit(struct file *fp)
1297 * This code is present to allow a knowledgeable person to 1302 * This code is present to allow a knowledgeable person to
1298 * specify the layout of processes to processors before opening 1303 * specify the layout of processes to processors before opening
1299 * this driver, and then we'll assign the process to the "closest" 1304 * this driver, and then we'll assign the process to the "closest"
1300 * HT-400 to that processor (we assume reasonable connectivity, 1305 * InfiniPath chip to that processor (we assume reasonable connectivity,
1301 * for now). This code assumes that if affinity has been set 1306 * for now). This code assumes that if affinity has been set
1302 * before this point, that at most one cpu is set; for now this 1307 * before this point, that at most one cpu is set; for now this
1303 * is reasonable. I check for both cpus_empty() and cpus_full(), 1308 * is reasonable. I check for both cpus_empty() and cpus_full(),
1304 * in case some kernel variant sets none of the bits when no 1309 * in case some kernel variant sets none of the bits when no
1305 * affinity is set. 2.6.11 and 12 kernels have all present 1310 * affinity is set. 2.6.11 and 12 kernels have all present
1306 * cpus set. Some day we'll have to fix it up further to handle 1311 * cpus set. Some day we'll have to fix it up further to handle
1307 * a cpu subset. This algorithm fails for two HT-400's connected 1312 * a cpu subset. This algorithm fails for two HT chips connected
1308 * in tunnel fashion. Eventually this needs real topology 1313 * in tunnel fashion. Eventually this needs real topology
1309 * information. There may be some issues with dual core numbering 1314 * information. There may be some issues with dual core numbering
1310 * as well. This needs more work prior to release. 1315 * as well. This needs more work prior to release.
@@ -1815,7 +1820,7 @@ int ipath_user_add(struct ipath_devdata *dd)
1815 if (ret < 0) { 1820 if (ret < 0) {
1816 ipath_dev_err(dd, "Could not create wildcard " 1821 ipath_dev_err(dd, "Could not create wildcard "
1817 "minor: error %d\n", -ret); 1822 "minor: error %d\n", -ret);
1818 goto bail_sma; 1823 goto bail_user;
1819 } 1824 }
1820 1825
1821 atomic_set(&user_setup, 1); 1826 atomic_set(&user_setup, 1);
@@ -1831,7 +1836,7 @@ int ipath_user_add(struct ipath_devdata *dd)
1831 1836
1832 goto bail; 1837 goto bail;
1833 1838
1834bail_sma: 1839bail_user:
1835 user_cleanup(); 1840 user_cleanup();
1836bail: 1841bail:
1837 return ret; 1842 return ret;
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index 0936d8e8d704..a5eb30a06a5c 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -191,8 +191,8 @@ static ssize_t atomic_port_info_read(struct file *file, char __user *buf,
191 portinfo[4] = (dd->ipath_lid << 16); 191 portinfo[4] = (dd->ipath_lid << 16);
192 192
193 /* 193 /*
194 * Notimpl yet SMLID (should we store this in the driver, in case 194 * Notimpl yet SMLID.
195 * SMA dies?) CapabilityMask is 0, we don't support any of these 195 * CapabilityMask is 0, we don't support any of these
196 * DiagCode is 0; we don't store any diag info for now Notimpl yet 196 * DiagCode is 0; we don't store any diag info for now Notimpl yet
197 * M_KeyLeasePeriod (we don't support M_Key) 197 * M_KeyLeasePeriod (we don't support M_Key)
198 */ 198 */
diff --git a/drivers/infiniband/hw/ipath/ipath_ht400.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c
index 3db015da6e77..bf2455a6d562 100644
--- a/drivers/infiniband/hw/ipath/ipath_ht400.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c
@@ -33,7 +33,7 @@
33 33
34/* 34/*
35 * This file contains all of the code that is specific to the InfiniPath 35 * This file contains all of the code that is specific to the InfiniPath
36 * HT-400 chip. 36 * HT chip.
37 */ 37 */
38 38
39#include <linux/pci.h> 39#include <linux/pci.h>
@@ -43,7 +43,7 @@
43#include "ipath_registers.h" 43#include "ipath_registers.h"
44 44
45/* 45/*
46 * This lists the InfiniPath HT400 registers, in the actual chip layout. 46 * This lists the InfiniPath registers, in the actual chip layout.
47 * This structure should never be directly accessed. 47 * This structure should never be directly accessed.
48 * 48 *
49 * The names are in InterCap form because they're taken straight from 49 * The names are in InterCap form because they're taken straight from
@@ -461,8 +461,9 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
461 * times. 461 * times.
462 */ 462 */
463 if (dd->ipath_flags & IPATH_INITTED) { 463 if (dd->ipath_flags & IPATH_INITTED) {
464 ipath_dev_err(dd, "Fatal Error (freeze " 464 ipath_dev_err(dd, "Fatal Hardware Error (freeze "
465 "mode), no longer usable\n"); 465 "mode), no longer usable, SN %.16s\n",
466 dd->ipath_serial);
466 isfatal = 1; 467 isfatal = 1;
467 } 468 }
468 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY; 469 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
@@ -537,7 +538,7 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
537 if (hwerrs & INFINIPATH_HWE_HTCMISCERR7) 538 if (hwerrs & INFINIPATH_HWE_HTCMISCERR7)
538 strlcat(msg, "[HT core Misc7]", msgl); 539 strlcat(msg, "[HT core Misc7]", msgl);
539 if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) { 540 if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
540 strlcat(msg, "[Memory BIST test failed, HT-400 unusable]", 541 strlcat(msg, "[Memory BIST test failed, InfiniPath hardware unusable]",
541 msgl); 542 msgl);
542 /* ignore from now on, so disable until driver reloaded */ 543 /* ignore from now on, so disable until driver reloaded */
543 dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED; 544 dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
@@ -553,7 +554,7 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
553 554
554 if (hwerrs & _IPATH_PLL_FAIL) { 555 if (hwerrs & _IPATH_PLL_FAIL) {
555 snprintf(bitsmsg, sizeof bitsmsg, 556 snprintf(bitsmsg, sizeof bitsmsg,
556 "[PLL failed (%llx), HT-400 unusable]", 557 "[PLL failed (%llx), InfiniPath hardware unusable]",
557 (unsigned long long) (hwerrs & _IPATH_PLL_FAIL)); 558 (unsigned long long) (hwerrs & _IPATH_PLL_FAIL));
558 strlcat(msg, bitsmsg, msgl); 559 strlcat(msg, bitsmsg, msgl);
559 /* ignore from now on, so disable until driver reloaded */ 560 /* ignore from now on, so disable until driver reloaded */
@@ -610,18 +611,18 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
610 break; 611 break;
611 case 5: 612 case 5:
612 /* 613 /*
613 * HT-460 original production board; two production levels, with 614 * original production board; two production levels, with
614 * different serial number ranges. See ipath_ht_early_init() for 615 * different serial number ranges. See ipath_ht_early_init() for
615 * case where we enable IPATH_GPIO_INTR for later serial # range. 616 * case where we enable IPATH_GPIO_INTR for later serial # range.
616 */ 617 */
617 n = "InfiniPath_HT-460"; 618 n = "InfiniPath_QHT7040";
618 break; 619 break;
619 case 6: 620 case 6:
620 n = "OEM_Board_3"; 621 n = "OEM_Board_3";
621 break; 622 break;
622 case 7: 623 case 7:
623 /* HT-460 small form factor production board */ 624 /* small form factor production board */
624 n = "InfiniPath_HT-465"; 625 n = "InfiniPath_QHT7140";
625 break; 626 break;
626 case 8: 627 case 8:
627 n = "LS/X-1"; 628 n = "LS/X-1";
@@ -633,7 +634,7 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
633 n = "OEM_Board_2"; 634 n = "OEM_Board_2";
634 break; 635 break;
635 case 11: 636 case 11:
636 n = "InfiniPath_HT-470"; 637 n = "InfiniPath_HT-470"; /* obsoleted */
637 break; 638 break;
638 case 12: 639 case 12:
639 n = "OEM_Board_4"; 640 n = "OEM_Board_4";
@@ -641,7 +642,7 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
641 default: /* don't know, just print the number */ 642 default: /* don't know, just print the number */
642 ipath_dev_err(dd, "Don't yet know about board " 643 ipath_dev_err(dd, "Don't yet know about board "
643 "with ID %u\n", boardrev); 644 "with ID %u\n", boardrev);
644 snprintf(name, namelen, "Unknown_InfiniPath_HT-4xx_%u", 645 snprintf(name, namelen, "Unknown_InfiniPath_QHT7xxx_%u",
645 boardrev); 646 boardrev);
646 break; 647 break;
647 } 648 }
@@ -650,11 +651,10 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
650 651
651 if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 || dd->ipath_minrev > 3)) { 652 if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 || dd->ipath_minrev > 3)) {
652 /* 653 /*
653 * This version of the driver only supports the HT-400 654 * This version of the driver only supports Rev 3.2 and 3.3
654 * Rev 3.2
655 */ 655 */
656 ipath_dev_err(dd, 656 ipath_dev_err(dd,
657 "Unsupported HT-400 revision %u.%u!\n", 657 "Unsupported InfiniPath hardware revision %u.%u!\n",
658 dd->ipath_majrev, dd->ipath_minrev); 658 dd->ipath_majrev, dd->ipath_minrev);
659 ret = 1; 659 ret = 1;
660 goto bail; 660 goto bail;
@@ -738,7 +738,7 @@ static void ipath_check_htlink(struct ipath_devdata *dd)
738 738
739static int ipath_setup_ht_reset(struct ipath_devdata *dd) 739static int ipath_setup_ht_reset(struct ipath_devdata *dd)
740{ 740{
741 ipath_dbg("No reset possible for HT-400\n"); 741 ipath_dbg("No reset possible for this InfiniPath hardware\n");
742 return 0; 742 return 0;
743} 743}
744 744
@@ -925,7 +925,7 @@ static int set_int_handler(struct ipath_devdata *dd, struct pci_dev *pdev,
925 925
926 /* 926 /*
927 * kernels with CONFIG_PCI_MSI set the vector in the irq field of 927 * kernels with CONFIG_PCI_MSI set the vector in the irq field of
928 * struct pci_device, so we use that to program the HT-400 internal 928 * struct pci_device, so we use that to program the internal
929 * interrupt register (not config space) with that value. The BIOS 929 * interrupt register (not config space) with that value. The BIOS
930 * must still have done the basic MSI setup. 930 * must still have done the basic MSI setup.
931 */ 931 */
@@ -1013,7 +1013,7 @@ bail:
1013 * @dd: the infinipath device 1013 * @dd: the infinipath device
1014 * 1014 *
1015 * Called during driver unload. 1015 * Called during driver unload.
1016 * This is currently a nop for the HT-400, not for all chips 1016 * This is currently a nop for the HT chip, not for all chips
1017 */ 1017 */
1018static void ipath_setup_ht_cleanup(struct ipath_devdata *dd) 1018static void ipath_setup_ht_cleanup(struct ipath_devdata *dd)
1019{ 1019{
@@ -1290,6 +1290,15 @@ static int ipath_ht_bringup_serdes(struct ipath_devdata *dd)
1290 val &= ~INFINIPATH_XGXS_RESET; 1290 val &= ~INFINIPATH_XGXS_RESET;
1291 change = 1; 1291 change = 1;
1292 } 1292 }
1293 if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) &
1294 INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) {
1295 /* need to compensate for Tx inversion in partner */
1296 val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
1297 INFINIPATH_XGXS_RX_POL_SHIFT);
1298 val |= dd->ipath_rx_pol_inv <<
1299 INFINIPATH_XGXS_RX_POL_SHIFT;
1300 change = 1;
1301 }
1293 if (change) 1302 if (change)
1294 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); 1303 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
1295 1304
@@ -1470,7 +1479,7 @@ static int ipath_ht_early_init(struct ipath_devdata *dd)
1470 dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE; 1479 dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
1471 1480
1472 /* 1481 /*
1473 * For HT-400, we allocate a somewhat overly large eager buffer, 1482 * For HT, we allocate a somewhat overly large eager buffer,
1474 * such that we can guarantee that we can receive the largest 1483 * such that we can guarantee that we can receive the largest
1475 * packet that we can send out. To truly support a 4KB MTU, 1484 * packet that we can send out. To truly support a 4KB MTU,
1476 * we need to bump this to a large value. To date, other than 1485 * we need to bump this to a large value. To date, other than
@@ -1531,7 +1540,7 @@ static int ipath_ht_early_init(struct ipath_devdata *dd)
1531 if(dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' && 1540 if(dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' &&
1532 dd->ipath_serial[1] == '2' && dd->ipath_serial[2] == '8') { 1541 dd->ipath_serial[1] == '2' && dd->ipath_serial[2] == '8') {
1533 /* 1542 /*
1534 * Later production HT-460 has same changes as HT-465, so 1543 * Later production QHT7040 has same changes as QHT7140, so
1535 * can use GPIO interrupts. They have serial #'s starting 1544 * can use GPIO interrupts. They have serial #'s starting
1536 * with 128, rather than 112. 1545 * with 128, rather than 112.
1537 */ 1546 */
@@ -1560,13 +1569,13 @@ static int ipath_ht_get_base_info(struct ipath_portdata *pd, void *kbase)
1560} 1569}
1561 1570
1562/** 1571/**
1563 * ipath_init_ht400_funcs - set up the chip-specific function pointers 1572 * ipath_init_iba6110_funcs - set up the chip-specific function pointers
1564 * @dd: the infinipath device 1573 * @dd: the infinipath device
1565 * 1574 *
1566 * This is global, and is called directly at init to set up the 1575 * This is global, and is called directly at init to set up the
1567 * chip-specific function pointers for later use. 1576 * chip-specific function pointers for later use.
1568 */ 1577 */
1569void ipath_init_ht400_funcs(struct ipath_devdata *dd) 1578void ipath_init_iba6110_funcs(struct ipath_devdata *dd)
1570{ 1579{
1571 dd->ipath_f_intrsetup = ipath_ht_intconfig; 1580 dd->ipath_f_intrsetup = ipath_ht_intconfig;
1572 dd->ipath_f_bus = ipath_setup_ht_config; 1581 dd->ipath_f_bus = ipath_setup_ht_config;
diff --git a/drivers/infiniband/hw/ipath/ipath_pe800.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c
index b83f66d8262c..d86516d23df6 100644
--- a/drivers/infiniband/hw/ipath/ipath_pe800.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c
@@ -32,7 +32,7 @@
32 */ 32 */
33/* 33/*
34 * This file contains all of the code that is specific to the 34 * This file contains all of the code that is specific to the
35 * InfiniPath PE-800 chip. 35 * InfiniPath PCIe chip.
36 */ 36 */
37 37
38#include <linux/interrupt.h> 38#include <linux/interrupt.h>
@@ -45,9 +45,9 @@
45 45
46/* 46/*
47 * This file contains all the chip-specific register information and 47 * This file contains all the chip-specific register information and
48 * access functions for the QLogic InfiniPath PE800, the PCI-Express chip. 48 * access functions for the QLogic InfiniPath PCI-Express chip.
49 * 49 *
50 * This lists the InfiniPath PE800 registers, in the actual chip layout. 50 * This lists the InfiniPath registers, in the actual chip layout.
51 * This structure should never be directly accessed. 51 * This structure should never be directly accessed.
52 */ 52 */
53struct _infinipath_do_not_use_kernel_regs { 53struct _infinipath_do_not_use_kernel_regs {
@@ -213,7 +213,6 @@ static const struct ipath_kregs ipath_pe_kregs = {
213 .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0), 213 .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
214 .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0), 214 .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0),
215 215
216 /* This group is pe-800-specific; and used only in this file */
217 /* The rcvpktled register controls one of the debug port signals, so 216 /* The rcvpktled register controls one of the debug port signals, so
218 * a packet activity LED can be connected to it. */ 217 * a packet activity LED can be connected to it. */
219 .kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt), 218 .kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt),
@@ -364,8 +363,9 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
364 * and we get here multiple times 363 * and we get here multiple times
365 */ 364 */
366 if (dd->ipath_flags & IPATH_INITTED) { 365 if (dd->ipath_flags & IPATH_INITTED) {
367 ipath_dev_err(dd, "Fatal Error (freeze " 366 ipath_dev_err(dd, "Fatal Hardware Error (freeze "
368 "mode), no longer usable\n"); 367 "mode), no longer usable, SN %.16s\n",
368 dd->ipath_serial);
369 isfatal = 1; 369 isfatal = 1;
370 } 370 }
371 /* 371 /*
@@ -388,7 +388,7 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
388 *msg = '\0'; 388 *msg = '\0';
389 389
390 if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) { 390 if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
391 strlcat(msg, "[Memory BIST test failed, PE-800 unusable]", 391 strlcat(msg, "[Memory BIST test failed, InfiniPath hardware unusable]",
392 msgl); 392 msgl);
393 /* ignore from now on, so disable until driver reloaded */ 393 /* ignore from now on, so disable until driver reloaded */
394 *dd->ipath_statusp |= IPATH_STATUS_HWERROR; 394 *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
@@ -433,7 +433,7 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
433 433
434 if (hwerrs & _IPATH_PLL_FAIL) { 434 if (hwerrs & _IPATH_PLL_FAIL) {
435 snprintf(bitsmsg, sizeof bitsmsg, 435 snprintf(bitsmsg, sizeof bitsmsg,
436 "[PLL failed (%llx), PE-800 unusable]", 436 "[PLL failed (%llx), InfiniPath hardware unusable]",
437 (unsigned long long) hwerrs & _IPATH_PLL_FAIL); 437 (unsigned long long) hwerrs & _IPATH_PLL_FAIL);
438 strlcat(msg, bitsmsg, msgl); 438 strlcat(msg, bitsmsg, msgl);
439 /* ignore from now on, so disable until driver reloaded */ 439 /* ignore from now on, so disable until driver reloaded */
@@ -511,22 +511,25 @@ static int ipath_pe_boardname(struct ipath_devdata *dd, char *name,
511 n = "InfiniPath_Emulation"; 511 n = "InfiniPath_Emulation";
512 break; 512 break;
513 case 1: 513 case 1:
514 n = "InfiniPath_PE-800-Bringup"; 514 n = "InfiniPath_QLE7140-Bringup";
515 break; 515 break;
516 case 2: 516 case 2:
517 n = "InfiniPath_PE-880"; 517 n = "InfiniPath_QLE7140";
518 break; 518 break;
519 case 3: 519 case 3:
520 n = "InfiniPath_PE-850"; 520 n = "InfiniPath_QMI7140";
521 break; 521 break;
522 case 4: 522 case 4:
523 n = "InfiniPath_PE-860"; 523 n = "InfiniPath_QEM7140";
524 break;
525 case 5:
526 n = "InfiniPath_QMH7140";
524 break; 527 break;
525 default: 528 default:
526 ipath_dev_err(dd, 529 ipath_dev_err(dd,
527 "Don't yet know about board with ID %u\n", 530 "Don't yet know about board with ID %u\n",
528 boardrev); 531 boardrev);
529 snprintf(name, namelen, "Unknown_InfiniPath_PE-8xx_%u", 532 snprintf(name, namelen, "Unknown_InfiniPath_PCIe_%u",
530 boardrev); 533 boardrev);
531 break; 534 break;
532 } 535 }
@@ -534,7 +537,7 @@ static int ipath_pe_boardname(struct ipath_devdata *dd, char *name,
534 snprintf(name, namelen, "%s", n); 537 snprintf(name, namelen, "%s", n);
535 538
536 if (dd->ipath_majrev != 4 || !dd->ipath_minrev || dd->ipath_minrev>2) { 539 if (dd->ipath_majrev != 4 || !dd->ipath_minrev || dd->ipath_minrev>2) {
537 ipath_dev_err(dd, "Unsupported PE-800 revision %u.%u!\n", 540 ipath_dev_err(dd, "Unsupported InfiniPath hardware revision %u.%u!\n",
538 dd->ipath_majrev, dd->ipath_minrev); 541 dd->ipath_majrev, dd->ipath_minrev);
539 ret = 1; 542 ret = 1;
540 } else 543 } else
@@ -651,6 +654,15 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
651 val &= ~INFINIPATH_XGXS_RESET; 654 val &= ~INFINIPATH_XGXS_RESET;
652 change = 1; 655 change = 1;
653 } 656 }
657 if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) &
658 INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) {
659 /* need to compensate for Tx inversion in partner */
660 val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
661 INFINIPATH_XGXS_RX_POL_SHIFT);
662 val |= dd->ipath_rx_pol_inv <<
663 INFINIPATH_XGXS_RX_POL_SHIFT;
664 change = 1;
665 }
654 if (change) 666 if (change)
655 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); 667 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
656 668
@@ -705,7 +717,7 @@ static void ipath_pe_quiet_serdes(struct ipath_devdata *dd)
705 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); 717 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
706} 718}
707 719
708/* this is not yet needed on the PE800, so just return 0. */ 720/* this is not yet needed on this chip, so just return 0. */
709static int ipath_pe_intconfig(struct ipath_devdata *dd) 721static int ipath_pe_intconfig(struct ipath_devdata *dd)
710{ 722{
711 return 0; 723 return 0;
@@ -759,8 +771,8 @@ static void ipath_setup_pe_setextled(struct ipath_devdata *dd, u64 lst,
759 * 771 *
760 * This is called during driver unload. 772 * This is called during driver unload.
761 * We do the pci_disable_msi here, not in generic code, because it 773 * We do the pci_disable_msi here, not in generic code, because it
762 * isn't used for the HT-400. If we do end up needing pci_enable_msi 774 * isn't used for the HT chips. If we do end up needing pci_enable_msi
763 * at some point in the future for HT-400, we'll move the call back 775 * at some point in the future for HT, we'll move the call back
764 * into the main init_one code. 776 * into the main init_one code.
765 */ 777 */
766static void ipath_setup_pe_cleanup(struct ipath_devdata *dd) 778static void ipath_setup_pe_cleanup(struct ipath_devdata *dd)
@@ -780,10 +792,10 @@ static void ipath_setup_pe_cleanup(struct ipath_devdata *dd)
780 * late in 2.6.16). 792 * late in 2.6.16).
781 * All that can be done is to edit the kernel source to remove the quirk 793 * All that can be done is to edit the kernel source to remove the quirk
782 * check until that is fixed. 794 * check until that is fixed.
783 * We do not need to call enable_msi() for our HyperTransport chip (HT-400), 795 * We do not need to call enable_msi() for our HyperTransport chip,
784 * even those it uses MSI, and we want to avoid the quirk warning, so 796 * even though it uses MSI, and we want to avoid the quirk warning, so
785 * So we call enable_msi only for the PE-800. If we do end up needing 797 * So we call enable_msi only for PCIe. If we do end up needing
786 * pci_enable_msi at some point in the future for HT-400, we'll move the 798 * pci_enable_msi at some point in the future for HT, we'll move the
787 * call back into the main init_one code. 799 * call back into the main init_one code.
788 * We save the msi lo and hi values, so we can restore them after 800 * We save the msi lo and hi values, so we can restore them after
789 * chip reset (the kernel PCI infrastructure doesn't yet handle that 801 * chip reset (the kernel PCI infrastructure doesn't yet handle that
@@ -971,8 +983,7 @@ static int ipath_setup_pe_reset(struct ipath_devdata *dd)
971 int ret; 983 int ret;
972 984
973 /* Use ERROR so it shows up in logs, etc. */ 985 /* Use ERROR so it shows up in logs, etc. */
974 ipath_dev_err(dd, "Resetting PE-800 unit %u\n", 986 ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit);
975 dd->ipath_unit);
976 /* keep chip from being accessed in a few places */ 987 /* keep chip from being accessed in a few places */
977 dd->ipath_flags &= ~(IPATH_INITTED|IPATH_PRESENT); 988 dd->ipath_flags &= ~(IPATH_INITTED|IPATH_PRESENT);
978 val = dd->ipath_control | INFINIPATH_C_RESET; 989 val = dd->ipath_control | INFINIPATH_C_RESET;
@@ -1078,7 +1089,7 @@ static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
1078 * @port: the port 1089 * @port: the port
1079 * 1090 *
1080 * clear all TID entries for a port, expected and eager. 1091 * clear all TID entries for a port, expected and eager.
1081 * Used from ipath_close(). On PE800, TIDs are only 32 bits, 1092 * Used from ipath_close(). On this chip, TIDs are only 32 bits,
1082 * not 64, but they are still on 64 bit boundaries, so tidbase 1093 * not 64, but they are still on 64 bit boundaries, so tidbase
1083 * is declared as u64 * for the pointer math, even though we write 32 bits 1094 * is declared as u64 * for the pointer math, even though we write 32 bits
1084 */ 1095 */
@@ -1148,9 +1159,9 @@ static int ipath_pe_early_init(struct ipath_devdata *dd)
1148 dd->ipath_flags |= IPATH_4BYTE_TID; 1159 dd->ipath_flags |= IPATH_4BYTE_TID;
1149 1160
1150 /* 1161 /*
1151 * For openib, we need to be able to handle an IB header of 96 bytes 1162 * For openfabrics, we need to be able to handle an IB header of
1152 * or 24 dwords. HT-400 has arbitrary sized receive buffers, so we 1163 * 24 dwords. HT chip has arbitrary sized receive buffers, so we
1153 * made them the same size as the PIO buffers. The PE-800 does not 1164 * made them the same size as the PIO buffers. This chip does not
1154 * handle arbitrary size buffers, so we need the header large enough 1165 * handle arbitrary size buffers, so we need the header large enough
1155 * to handle largest IB header, but still have room for a 2KB MTU 1166 * to handle largest IB header, but still have room for a 2KB MTU
1156 * standard IB packet. 1167 * standard IB packet.
@@ -1158,11 +1169,10 @@ static int ipath_pe_early_init(struct ipath_devdata *dd)
1158 dd->ipath_rcvhdrentsize = 24; 1169 dd->ipath_rcvhdrentsize = 24;
1159 dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE; 1170 dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
1160 1171
1161 /* For HT-400, we allocate a somewhat overly large eager buffer, 1172 /*
1162 * such that we can guarantee that we can receive the largest packet 1173 * To truly support a 4KB MTU (for usermode), we need to
1163 * that we can send out. To truly support a 4KB MTU, we need to 1174 * bump this to a larger value. For now, we use them for
1164 * bump this to a larger value. We'll do this when I get around to 1175 * the kernel only.
1165 * testing 4KB sends on the PE-800, which I have not yet done.
1166 */ 1176 */
1167 dd->ipath_rcvegrbufsize = 2048; 1177 dd->ipath_rcvegrbufsize = 2048;
1168 /* 1178 /*
@@ -1175,9 +1185,9 @@ static int ipath_pe_early_init(struct ipath_devdata *dd)
1175 dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen; 1185 dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
1176 1186
1177 /* 1187 /*
1178 * For PE-800, we can request a receive interrupt for 1 or 1188 * We can request a receive interrupt for 1 or
1179 * more packets from current offset. For now, we set this 1189 * more packets from current offset. For now, we set this
1180 * up for a single packet, to match the HT-400 behavior. 1190 * up for a single packet.
1181 */ 1191 */
1182 dd->ipath_rhdrhead_intr_off = 1ULL<<32; 1192 dd->ipath_rhdrhead_intr_off = 1ULL<<32;
1183 1193
@@ -1216,13 +1226,13 @@ static int ipath_pe_get_base_info(struct ipath_portdata *pd, void *kbase)
1216} 1226}
1217 1227
1218/** 1228/**
1219 * ipath_init_pe800_funcs - set up the chip-specific function pointers 1229 * ipath_init_iba6120_funcs - set up the chip-specific function pointers
1220 * @dd: the infinipath device 1230 * @dd: the infinipath device
1221 * 1231 *
1222 * This is global, and is called directly at init to set up the 1232 * This is global, and is called directly at init to set up the
1223 * chip-specific function pointers for later use. 1233 * chip-specific function pointers for later use.
1224 */ 1234 */
1225void ipath_init_pe800_funcs(struct ipath_devdata *dd) 1235void ipath_init_iba6120_funcs(struct ipath_devdata *dd)
1226{ 1236{
1227 dd->ipath_f_intrsetup = ipath_pe_intconfig; 1237 dd->ipath_f_intrsetup = ipath_pe_intconfig;
1228 dd->ipath_f_bus = ipath_setup_pe_config; 1238 dd->ipath_f_bus = ipath_setup_pe_config;
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index 414cdd1d80a6..44669dc2e22d 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -53,8 +53,8 @@ module_param_named(cfgports, ipath_cfgports, ushort, S_IRUGO);
53MODULE_PARM_DESC(cfgports, "Set max number of ports to use"); 53MODULE_PARM_DESC(cfgports, "Set max number of ports to use");
54 54
55/* 55/*
56 * Number of buffers reserved for driver (layered drivers and SMA 56 * Number of buffers reserved for driver (verbs and layered drivers.)
57 * send). Reserved at end of buffer list. Initialized based on 57 * Reserved at end of buffer list. Initialized based on
58 * number of PIO buffers if not set via module interface. 58 * number of PIO buffers if not set via module interface.
59 * The problem with this is that it's global, but we'll use different 59 * The problem with this is that it's global, but we'll use different
60 * numbers for different chip types. So the default value is not 60 * numbers for different chip types. So the default value is not
@@ -80,7 +80,7 @@ MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver");
80 * 80 *
81 * Allocate the eager TID buffers and program them into infinipath. 81 * Allocate the eager TID buffers and program them into infinipath.
82 * We use the network layer alloc_skb() allocator to allocate the 82 * We use the network layer alloc_skb() allocator to allocate the
83 * memory, and either use the buffers as is for things like SMA 83 * memory, and either use the buffers as is for things like verbs
84 * packets, or pass the buffers up to the ipath layered driver and 84 * packets, or pass the buffers up to the ipath layered driver and
85 * thence the network layer, replacing them as we do so (see 85 * thence the network layer, replacing them as we do so (see
86 * ipath_rcv_layer()). 86 * ipath_rcv_layer()).
@@ -240,7 +240,11 @@ static int init_chip_first(struct ipath_devdata *dd,
240 "only supports %u\n", ipath_cfgports, 240 "only supports %u\n", ipath_cfgports,
241 dd->ipath_portcnt); 241 dd->ipath_portcnt);
242 } 242 }
243 dd->ipath_pd = kzalloc(sizeof(*dd->ipath_pd) * dd->ipath_cfgports, 243 /*
244 * Allocate full portcnt array, rather than just cfgports, because
245 * cleanup iterates across all possible ports.
246 */
247 dd->ipath_pd = kzalloc(sizeof(*dd->ipath_pd) * dd->ipath_portcnt,
244 GFP_KERNEL); 248 GFP_KERNEL);
245 249
246 if (!dd->ipath_pd) { 250 if (!dd->ipath_pd) {
@@ -446,9 +450,9 @@ static void enable_chip(struct ipath_devdata *dd,
446 u32 val; 450 u32 val;
447 int i; 451 int i;
448 452
449 if (!reinit) { 453 if (!reinit)
450 init_waitqueue_head(&ipath_sma_state_wait); 454 init_waitqueue_head(&ipath_state_wait);
451 } 455
452 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 456 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
453 dd->ipath_rcvctrl); 457 dd->ipath_rcvctrl);
454 458
@@ -687,7 +691,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
687 dd->ipath_pioavregs = ALIGN(val, sizeof(u64) * BITS_PER_BYTE / 2) 691 dd->ipath_pioavregs = ALIGN(val, sizeof(u64) * BITS_PER_BYTE / 2)
688 / (sizeof(u64) * BITS_PER_BYTE / 2); 692 / (sizeof(u64) * BITS_PER_BYTE / 2);
689 if (ipath_kpiobufs == 0) { 693 if (ipath_kpiobufs == 0) {
690 /* not set by user, or set explictly to default */ 694 /* not set by user (this is default) */
691 if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) > 128) 695 if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) > 128)
692 kpiobufs = 32; 696 kpiobufs = 32;
693 else 697 else
@@ -946,6 +950,7 @@ static int ipath_set_kpiobufs(const char *str, struct kernel_param *kp)
946 dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - val; 950 dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - val;
947 } 951 }
948 952
953 ipath_kpiobufs = val;
949 ret = 0; 954 ret = 0;
950bail: 955bail:
951 spin_unlock_irqrestore(&ipath_devs_lock, flags); 956 spin_unlock_irqrestore(&ipath_devs_lock, flags);
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index 280e732660a1..49bf7bb15b04 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -34,7 +34,7 @@
34#include <linux/pci.h> 34#include <linux/pci.h>
35 35
36#include "ipath_kernel.h" 36#include "ipath_kernel.h"
37#include "ipath_layer.h" 37#include "ipath_verbs.h"
38#include "ipath_common.h" 38#include "ipath_common.h"
39 39
40/* These are all rcv-related errors which we want to count for stats */ 40/* These are all rcv-related errors which we want to count for stats */
@@ -201,7 +201,7 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
201 ib_linkstate(lstate)); 201 ib_linkstate(lstate));
202 } 202 }
203 else 203 else
204 ipath_cdbg(SMA, "Unit %u link state %s, last " 204 ipath_cdbg(VERBOSE, "Unit %u link state %s, last "
205 "was %s\n", dd->ipath_unit, 205 "was %s\n", dd->ipath_unit,
206 ib_linkstate(lstate), 206 ib_linkstate(lstate),
207 ib_linkstate((unsigned) 207 ib_linkstate((unsigned)
@@ -213,7 +213,7 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
213 if (lstate == IPATH_IBSTATE_INIT || 213 if (lstate == IPATH_IBSTATE_INIT ||
214 lstate == IPATH_IBSTATE_ARM || 214 lstate == IPATH_IBSTATE_ARM ||
215 lstate == IPATH_IBSTATE_ACTIVE) 215 lstate == IPATH_IBSTATE_ACTIVE)
216 ipath_cdbg(SMA, "Unit %u link state down" 216 ipath_cdbg(VERBOSE, "Unit %u link state down"
217 " (state 0x%x), from %s\n", 217 " (state 0x%x), from %s\n",
218 dd->ipath_unit, 218 dd->ipath_unit,
219 (u32)val & IPATH_IBSTATE_MASK, 219 (u32)val & IPATH_IBSTATE_MASK,
@@ -269,7 +269,7 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
269 INFINIPATH_IBCS_LINKSTATE_MASK) 269 INFINIPATH_IBCS_LINKSTATE_MASK)
270 == INFINIPATH_IBCS_L_STATE_ACTIVE) 270 == INFINIPATH_IBCS_L_STATE_ACTIVE)
271 /* if from up to down be more vocal */ 271 /* if from up to down be more vocal */
272 ipath_cdbg(SMA, 272 ipath_cdbg(VERBOSE,
273 "Unit %u link now down (%s)\n", 273 "Unit %u link now down (%s)\n",
274 dd->ipath_unit, 274 dd->ipath_unit,
275 ipath_ibcstatus_str[ltstate]); 275 ipath_ibcstatus_str[ltstate]);
@@ -289,8 +289,6 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
289 *dd->ipath_statusp |= 289 *dd->ipath_statusp |=
290 IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF; 290 IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF;
291 dd->ipath_f_setextled(dd, lstate, ltstate); 291 dd->ipath_f_setextled(dd, lstate, ltstate);
292
293 __ipath_layer_intr(dd, IPATH_LAYER_INT_IF_UP);
294 } else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_INIT) { 292 } else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_INIT) {
295 /* 293 /*
296 * set INIT and DOWN. Down is checked by most of the other 294 * set INIT and DOWN. Down is checked by most of the other
@@ -598,11 +596,11 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
598 596
599 if (!noprint && *msg) 597 if (!noprint && *msg)
600 ipath_dev_err(dd, "%s error\n", msg); 598 ipath_dev_err(dd, "%s error\n", msg);
601 if (dd->ipath_sma_state_wanted & dd->ipath_flags) { 599 if (dd->ipath_state_wanted & dd->ipath_flags) {
602 ipath_cdbg(VERBOSE, "sma wanted state %x, iflags now %x, " 600 ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, "
603 "waking\n", dd->ipath_sma_state_wanted, 601 "waking\n", dd->ipath_state_wanted,
604 dd->ipath_flags); 602 dd->ipath_flags);
605 wake_up_interruptible(&ipath_sma_state_wait); 603 wake_up_interruptible(&ipath_state_wait);
606 } 604 }
607 605
608 return chkerrpkts; 606 return chkerrpkts;
@@ -708,11 +706,7 @@ static void handle_layer_pioavail(struct ipath_devdata *dd)
708{ 706{
709 int ret; 707 int ret;
710 708
711 ret = __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE); 709 ret = ipath_ib_piobufavail(dd->verbs_dev);
712 if (ret > 0)
713 goto set;
714
715 ret = __ipath_verbs_piobufavail(dd);
716 if (ret > 0) 710 if (ret > 0)
717 goto set; 711 goto set;
718 712
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index e9f374fb641e..a8a56276ff1d 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -132,12 +132,6 @@ struct _ipath_layer {
132 void *l_arg; 132 void *l_arg;
133}; 133};
134 134
135/* Verbs layer interface */
136struct _verbs_layer {
137 void *l_arg;
138 struct timer_list l_timer;
139};
140
141struct ipath_devdata { 135struct ipath_devdata {
142 struct list_head ipath_list; 136 struct list_head ipath_list;
143 137
@@ -198,7 +192,8 @@ struct ipath_devdata {
198 void (*ipath_f_setextled)(struct ipath_devdata *, u64, u64); 192 void (*ipath_f_setextled)(struct ipath_devdata *, u64, u64);
199 /* fill out chip-specific fields */ 193 /* fill out chip-specific fields */
200 int (*ipath_f_get_base_info)(struct ipath_portdata *, void *); 194 int (*ipath_f_get_base_info)(struct ipath_portdata *, void *);
201 struct _verbs_layer verbs_layer; 195 struct ipath_ibdev *verbs_dev;
196 struct timer_list verbs_timer;
202 /* total dwords sent (summed from counter) */ 197 /* total dwords sent (summed from counter) */
203 u64 ipath_sword; 198 u64 ipath_sword;
204 /* total dwords rcvd (summed from counter) */ 199 /* total dwords rcvd (summed from counter) */
@@ -241,7 +236,7 @@ struct ipath_devdata {
241 u64 ipath_tidtemplate; 236 u64 ipath_tidtemplate;
242 /* value to write to free TIDs */ 237 /* value to write to free TIDs */
243 u64 ipath_tidinvalid; 238 u64 ipath_tidinvalid;
244 /* PE-800 rcv interrupt setup */ 239 /* IBA6120 rcv interrupt setup */
245 u64 ipath_rhdrhead_intr_off; 240 u64 ipath_rhdrhead_intr_off;
246 241
247 /* size of memory at ipath_kregbase */ 242 /* size of memory at ipath_kregbase */
@@ -250,8 +245,8 @@ struct ipath_devdata {
250 u32 ipath_pioavregs; 245 u32 ipath_pioavregs;
251 /* IPATH_POLL, etc. */ 246 /* IPATH_POLL, etc. */
252 u32 ipath_flags; 247 u32 ipath_flags;
253 /* ipath_flags sma is waiting for */ 248 /* ipath_flags driver is waiting for */
254 u32 ipath_sma_state_wanted; 249 u32 ipath_state_wanted;
255 /* last buffer for user use, first buf for kernel use is this 250 /* last buffer for user use, first buf for kernel use is this
256 * index. */ 251 * index. */
257 u32 ipath_lastport_piobuf; 252 u32 ipath_lastport_piobuf;
@@ -311,10 +306,6 @@ struct ipath_devdata {
311 u32 ipath_pcibar0; 306 u32 ipath_pcibar0;
312 /* so we can rewrite it after a chip reset */ 307 /* so we can rewrite it after a chip reset */
313 u32 ipath_pcibar1; 308 u32 ipath_pcibar1;
314 /* sequential tries for SMA send and no bufs */
315 u32 ipath_nosma_bufs;
316 /* duration (seconds) ipath_nosma_bufs set */
317 u32 ipath_nosma_secs;
318 309
319 /* HT/PCI Vendor ID (here for NodeInfo) */ 310 /* HT/PCI Vendor ID (here for NodeInfo) */
320 u16 ipath_vendorid; 311 u16 ipath_vendorid;
@@ -512,6 +503,8 @@ struct ipath_devdata {
512 u8 ipath_pci_cacheline; 503 u8 ipath_pci_cacheline;
513 /* LID mask control */ 504 /* LID mask control */
514 u8 ipath_lmc; 505 u8 ipath_lmc;
506 /* Rx Polarity inversion (compensate for ~tx on partner) */
507 u8 ipath_rx_pol_inv;
515 508
516 /* local link integrity counter */ 509 /* local link integrity counter */
517 u32 ipath_lli_counter; 510 u32 ipath_lli_counter;
@@ -523,18 +516,6 @@ extern struct list_head ipath_dev_list;
523extern spinlock_t ipath_devs_lock; 516extern spinlock_t ipath_devs_lock;
524extern struct ipath_devdata *ipath_lookup(int unit); 517extern struct ipath_devdata *ipath_lookup(int unit);
525 518
526extern u16 ipath_layer_rcv_opcode;
527extern int __ipath_layer_intr(struct ipath_devdata *, u32);
528extern int ipath_layer_intr(struct ipath_devdata *, u32);
529extern int __ipath_layer_rcv(struct ipath_devdata *, void *,
530 struct sk_buff *);
531extern int __ipath_layer_rcv_lid(struct ipath_devdata *, void *);
532extern int __ipath_verbs_piobufavail(struct ipath_devdata *);
533extern int __ipath_verbs_rcv(struct ipath_devdata *, void *, void *, u32);
534
535void ipath_layer_add(struct ipath_devdata *);
536void ipath_layer_remove(struct ipath_devdata *);
537
538int ipath_init_chip(struct ipath_devdata *, int); 519int ipath_init_chip(struct ipath_devdata *, int);
539int ipath_enable_wc(struct ipath_devdata *dd); 520int ipath_enable_wc(struct ipath_devdata *dd);
540void ipath_disable_wc(struct ipath_devdata *dd); 521void ipath_disable_wc(struct ipath_devdata *dd);
@@ -549,9 +530,8 @@ void ipath_cdev_cleanup(struct cdev **cdevp,
549 530
550int ipath_diag_add(struct ipath_devdata *); 531int ipath_diag_add(struct ipath_devdata *);
551void ipath_diag_remove(struct ipath_devdata *); 532void ipath_diag_remove(struct ipath_devdata *);
552void ipath_diag_bringup_link(struct ipath_devdata *);
553 533
554extern wait_queue_head_t ipath_sma_state_wait; 534extern wait_queue_head_t ipath_state_wait;
555 535
556int ipath_user_add(struct ipath_devdata *dd); 536int ipath_user_add(struct ipath_devdata *dd);
557void ipath_user_remove(struct ipath_devdata *dd); 537void ipath_user_remove(struct ipath_devdata *dd);
@@ -582,12 +562,14 @@ void ipath_free_pddata(struct ipath_devdata *, struct ipath_portdata *);
582 562
583int ipath_parse_ushort(const char *str, unsigned short *valp); 563int ipath_parse_ushort(const char *str, unsigned short *valp);
584 564
585int ipath_wait_linkstate(struct ipath_devdata *, u32, int);
586void ipath_set_ib_lstate(struct ipath_devdata *, int);
587void ipath_kreceive(struct ipath_devdata *); 565void ipath_kreceive(struct ipath_devdata *);
588int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned); 566int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned);
589int ipath_reset_device(int); 567int ipath_reset_device(int);
590void ipath_get_faststats(unsigned long); 568void ipath_get_faststats(unsigned long);
569int ipath_set_linkstate(struct ipath_devdata *, u8);
570int ipath_set_mtu(struct ipath_devdata *, u16);
571int ipath_set_lid(struct ipath_devdata *, u32, u8);
572int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
591 573
592/* for use in system calls, where we want to know device type, etc. */ 574/* for use in system calls, where we want to know device type, etc. */
593#define port_fp(fp) ((struct ipath_portdata *) (fp)->private_data) 575#define port_fp(fp) ((struct ipath_portdata *) (fp)->private_data)
@@ -642,10 +624,8 @@ void ipath_free_data(struct ipath_portdata *dd);
642int ipath_waitfor_mdio_cmdready(struct ipath_devdata *); 624int ipath_waitfor_mdio_cmdready(struct ipath_devdata *);
643int ipath_waitfor_complete(struct ipath_devdata *, ipath_kreg, u64, u64 *); 625int ipath_waitfor_complete(struct ipath_devdata *, ipath_kreg, u64, u64 *);
644u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *); 626u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *);
645/* init PE-800-specific func */ 627void ipath_init_iba6120_funcs(struct ipath_devdata *);
646void ipath_init_pe800_funcs(struct ipath_devdata *); 628void ipath_init_iba6110_funcs(struct ipath_devdata *);
647/* init HT-400-specific func */
648void ipath_init_ht400_funcs(struct ipath_devdata *);
649void ipath_get_eeprom_info(struct ipath_devdata *); 629void ipath_get_eeprom_info(struct ipath_devdata *);
650u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg); 630u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
651 631
@@ -801,7 +781,7 @@ static inline u32 ipath_read_creg32(const struct ipath_devdata *dd,
801 781
802struct device_driver; 782struct device_driver;
803 783
804extern const char ipath_core_version[]; 784extern const char ib_ipath_version[];
805 785
806int ipath_driver_create_group(struct device_driver *); 786int ipath_driver_create_group(struct device_driver *);
807void ipath_driver_remove_group(struct device_driver *); 787void ipath_driver_remove_group(struct device_driver *);
@@ -810,6 +790,9 @@ int ipath_device_create_group(struct device *, struct ipath_devdata *);
810void ipath_device_remove_group(struct device *, struct ipath_devdata *); 790void ipath_device_remove_group(struct device *, struct ipath_devdata *);
811int ipath_expose_reset(struct device *); 791int ipath_expose_reset(struct device *);
812 792
793int ipath_diagpkt_add(void);
794void ipath_diagpkt_remove(void);
795
813int ipath_init_ipathfs(void); 796int ipath_init_ipathfs(void);
814void ipath_exit_ipathfs(void); 797void ipath_exit_ipathfs(void);
815int ipathfs_add_device(struct ipath_devdata *); 798int ipathfs_add_device(struct ipath_devdata *);
@@ -831,10 +814,10 @@ const char *ipath_get_unit_name(int unit);
831 814
832extern struct mutex ipath_mutex; 815extern struct mutex ipath_mutex;
833 816
834#define IPATH_DRV_NAME "ipath_core" 817#define IPATH_DRV_NAME "ib_ipath"
835#define IPATH_MAJOR 233 818#define IPATH_MAJOR 233
836#define IPATH_USER_MINOR_BASE 0 819#define IPATH_USER_MINOR_BASE 0
837#define IPATH_SMA_MINOR 128 820#define IPATH_DIAGPKT_MINOR 127
838#define IPATH_DIAG_MINOR_BASE 129 821#define IPATH_DIAG_MINOR_BASE 129
839#define IPATH_NMINORS 255 822#define IPATH_NMINORS 255
840 823
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c
index a5ca279370aa..ba1b93226caa 100644
--- a/drivers/infiniband/hw/ipath/ipath_keys.c
+++ b/drivers/infiniband/hw/ipath/ipath_keys.c
@@ -34,6 +34,7 @@
34#include <asm/io.h> 34#include <asm/io.h>
35 35
36#include "ipath_verbs.h" 36#include "ipath_verbs.h"
37#include "ipath_kernel.h"
37 38
38/** 39/**
39 * ipath_alloc_lkey - allocate an lkey 40 * ipath_alloc_lkey - allocate an lkey
@@ -60,7 +61,7 @@ int ipath_alloc_lkey(struct ipath_lkey_table *rkt, struct ipath_mregion *mr)
60 r = (r + 1) & (rkt->max - 1); 61 r = (r + 1) & (rkt->max - 1);
61 if (r == n) { 62 if (r == n) {
62 spin_unlock_irqrestore(&rkt->lock, flags); 63 spin_unlock_irqrestore(&rkt->lock, flags);
63 _VERBS_INFO("LKEY table full\n"); 64 ipath_dbg(KERN_INFO "LKEY table full\n");
64 ret = 0; 65 ret = 0;
65 goto bail; 66 goto bail;
66 } 67 }
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.c b/drivers/infiniband/hw/ipath/ipath_layer.c
index b28c6f81c731..e46aa4ed2a7e 100644
--- a/drivers/infiniband/hw/ipath/ipath_layer.c
+++ b/drivers/infiniband/hw/ipath/ipath_layer.c
@@ -42,26 +42,20 @@
42 42
43#include "ipath_kernel.h" 43#include "ipath_kernel.h"
44#include "ipath_layer.h" 44#include "ipath_layer.h"
45#include "ipath_verbs.h"
45#include "ipath_common.h" 46#include "ipath_common.h"
46 47
47/* Acquire before ipath_devs_lock. */ 48/* Acquire before ipath_devs_lock. */
48static DEFINE_MUTEX(ipath_layer_mutex); 49static DEFINE_MUTEX(ipath_layer_mutex);
49 50
50static int ipath_verbs_registered;
51
52u16 ipath_layer_rcv_opcode; 51u16 ipath_layer_rcv_opcode;
53 52
54static int (*layer_intr)(void *, u32); 53static int (*layer_intr)(void *, u32);
55static int (*layer_rcv)(void *, void *, struct sk_buff *); 54static int (*layer_rcv)(void *, void *, struct sk_buff *);
56static int (*layer_rcv_lid)(void *, void *); 55static int (*layer_rcv_lid)(void *, void *);
57static int (*verbs_piobufavail)(void *);
58static void (*verbs_rcv)(void *, void *, void *, u32);
59 56
60static void *(*layer_add_one)(int, struct ipath_devdata *); 57static void *(*layer_add_one)(int, struct ipath_devdata *);
61static void (*layer_remove_one)(void *); 58static void (*layer_remove_one)(void *);
62static void *(*verbs_add_one)(int, struct ipath_devdata *);
63static void (*verbs_remove_one)(void *);
64static void (*verbs_timer_cb)(void *);
65 59
66int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg) 60int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
67{ 61{
@@ -107,302 +101,16 @@ int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr)
107 return ret; 101 return ret;
108} 102}
109 103
110int __ipath_verbs_piobufavail(struct ipath_devdata *dd) 104void ipath_layer_lid_changed(struct ipath_devdata *dd)
111{
112 int ret = -ENODEV;
113
114 if (dd->verbs_layer.l_arg && verbs_piobufavail)
115 ret = verbs_piobufavail(dd->verbs_layer.l_arg);
116
117 return ret;
118}
119
120int __ipath_verbs_rcv(struct ipath_devdata *dd, void *rc, void *ebuf,
121 u32 tlen)
122{
123 int ret = -ENODEV;
124
125 if (dd->verbs_layer.l_arg && verbs_rcv) {
126 verbs_rcv(dd->verbs_layer.l_arg, rc, ebuf, tlen);
127 ret = 0;
128 }
129
130 return ret;
131}
132
133int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 newstate)
134{ 105{
135 u32 lstate;
136 int ret;
137
138 switch (newstate) {
139 case IPATH_IB_LINKDOWN:
140 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL <<
141 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
142 /* don't wait */
143 ret = 0;
144 goto bail;
145
146 case IPATH_IB_LINKDOWN_SLEEP:
147 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP <<
148 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
149 /* don't wait */
150 ret = 0;
151 goto bail;
152
153 case IPATH_IB_LINKDOWN_DISABLE:
154 ipath_set_ib_lstate(dd,
155 INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
156 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
157 /* don't wait */
158 ret = 0;
159 goto bail;
160
161 case IPATH_IB_LINKINIT:
162 if (dd->ipath_flags & IPATH_LINKINIT) {
163 ret = 0;
164 goto bail;
165 }
166 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT <<
167 INFINIPATH_IBCC_LINKCMD_SHIFT);
168 lstate = IPATH_LINKINIT;
169 break;
170
171 case IPATH_IB_LINKARM:
172 if (dd->ipath_flags & IPATH_LINKARMED) {
173 ret = 0;
174 goto bail;
175 }
176 if (!(dd->ipath_flags &
177 (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
178 ret = -EINVAL;
179 goto bail;
180 }
181 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED <<
182 INFINIPATH_IBCC_LINKCMD_SHIFT);
183 /*
184 * Since the port can transition to ACTIVE by receiving
185 * a non VL 15 packet, wait for either state.
186 */
187 lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
188 break;
189
190 case IPATH_IB_LINKACTIVE:
191 if (dd->ipath_flags & IPATH_LINKACTIVE) {
192 ret = 0;
193 goto bail;
194 }
195 if (!(dd->ipath_flags & IPATH_LINKARMED)) {
196 ret = -EINVAL;
197 goto bail;
198 }
199 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE <<
200 INFINIPATH_IBCC_LINKCMD_SHIFT);
201 lstate = IPATH_LINKACTIVE;
202 break;
203
204 default:
205 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
206 ret = -EINVAL;
207 goto bail;
208 }
209 ret = ipath_wait_linkstate(dd, lstate, 2000);
210
211bail:
212 return ret;
213}
214
215EXPORT_SYMBOL_GPL(ipath_layer_set_linkstate);
216
217/**
218 * ipath_layer_set_mtu - set the MTU
219 * @dd: the infinipath device
220 * @arg: the new MTU
221 *
222 * we can handle "any" incoming size, the issue here is whether we
223 * need to restrict our outgoing size. For now, we don't do any
224 * sanity checking on this, and we don't deal with what happens to
225 * programs that are already running when the size changes.
226 * NOTE: changing the MTU will usually cause the IBC to go back to
227 * link initialize (IPATH_IBSTATE_INIT) state...
228 */
229int ipath_layer_set_mtu(struct ipath_devdata *dd, u16 arg)
230{
231 u32 piosize;
232 int changed = 0;
233 int ret;
234
235 /*
236 * mtu is IB data payload max. It's the largest power of 2 less
237 * than piosize (or even larger, since it only really controls the
238 * largest we can receive; we can send the max of the mtu and
239 * piosize). We check that it's one of the valid IB sizes.
240 */
241 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
242 arg != 4096) {
243 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
244 ret = -EINVAL;
245 goto bail;
246 }
247 if (dd->ipath_ibmtu == arg) {
248 ret = 0; /* same as current */
249 goto bail;
250 }
251
252 piosize = dd->ipath_ibmaxlen;
253 dd->ipath_ibmtu = arg;
254
255 if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
256 /* Only if it's not the initial value (or reset to it) */
257 if (piosize != dd->ipath_init_ibmaxlen) {
258 dd->ipath_ibmaxlen = piosize;
259 changed = 1;
260 }
261 } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
262 piosize = arg + IPATH_PIO_MAXIBHDR;
263 ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
264 "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
265 arg);
266 dd->ipath_ibmaxlen = piosize;
267 changed = 1;
268 }
269
270 if (changed) {
271 /*
272 * set the IBC maxpktlength to the size of our pio
273 * buffers in words
274 */
275 u64 ibc = dd->ipath_ibcctrl;
276 ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
277 INFINIPATH_IBCC_MAXPKTLEN_SHIFT);
278
279 piosize = piosize - 2 * sizeof(u32); /* ignore pbc */
280 dd->ipath_ibmaxlen = piosize;
281 piosize /= sizeof(u32); /* in words */
282 /*
283 * for ICRC, which we only send in diag test pkt mode, and
284 * we don't need to worry about that for mtu
285 */
286 piosize += 1;
287
288 ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
289 dd->ipath_ibcctrl = ibc;
290 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
291 dd->ipath_ibcctrl);
292 dd->ipath_f_tidtemplate(dd);
293 }
294
295 ret = 0;
296
297bail:
298 return ret;
299}
300
301EXPORT_SYMBOL_GPL(ipath_layer_set_mtu);
302
303int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
304{
305 dd->ipath_lid = arg;
306 dd->ipath_lmc = lmc;
307
308 mutex_lock(&ipath_layer_mutex); 106 mutex_lock(&ipath_layer_mutex);
309 107
310 if (dd->ipath_layer.l_arg && layer_intr) 108 if (dd->ipath_layer.l_arg && layer_intr)
311 layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID); 109 layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID);
312 110
313 mutex_unlock(&ipath_layer_mutex); 111 mutex_unlock(&ipath_layer_mutex);
314
315 return 0;
316}
317
318EXPORT_SYMBOL_GPL(ipath_set_lid);
319
320int ipath_layer_set_guid(struct ipath_devdata *dd, __be64 guid)
321{
322 /* XXX - need to inform anyone who cares this just happened. */
323 dd->ipath_guid = guid;
324 return 0;
325}
326
327EXPORT_SYMBOL_GPL(ipath_layer_set_guid);
328
329__be64 ipath_layer_get_guid(struct ipath_devdata *dd)
330{
331 return dd->ipath_guid;
332}
333
334EXPORT_SYMBOL_GPL(ipath_layer_get_guid);
335
336u32 ipath_layer_get_nguid(struct ipath_devdata *dd)
337{
338 return dd->ipath_nguid;
339}
340
341EXPORT_SYMBOL_GPL(ipath_layer_get_nguid);
342
343u32 ipath_layer_get_majrev(struct ipath_devdata *dd)
344{
345 return dd->ipath_majrev;
346} 112}
347 113
348EXPORT_SYMBOL_GPL(ipath_layer_get_majrev);
349
350u32 ipath_layer_get_minrev(struct ipath_devdata *dd)
351{
352 return dd->ipath_minrev;
353}
354
355EXPORT_SYMBOL_GPL(ipath_layer_get_minrev);
356
357u32 ipath_layer_get_pcirev(struct ipath_devdata *dd)
358{
359 return dd->ipath_pcirev;
360}
361
362EXPORT_SYMBOL_GPL(ipath_layer_get_pcirev);
363
364u32 ipath_layer_get_flags(struct ipath_devdata *dd)
365{
366 return dd->ipath_flags;
367}
368
369EXPORT_SYMBOL_GPL(ipath_layer_get_flags);
370
371struct device *ipath_layer_get_device(struct ipath_devdata *dd)
372{
373 return &dd->pcidev->dev;
374}
375
376EXPORT_SYMBOL_GPL(ipath_layer_get_device);
377
378u16 ipath_layer_get_deviceid(struct ipath_devdata *dd)
379{
380 return dd->ipath_deviceid;
381}
382
383EXPORT_SYMBOL_GPL(ipath_layer_get_deviceid);
384
385u32 ipath_layer_get_vendorid(struct ipath_devdata *dd)
386{
387 return dd->ipath_vendorid;
388}
389
390EXPORT_SYMBOL_GPL(ipath_layer_get_vendorid);
391
392u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd)
393{
394 return dd->ipath_lastibcstat;
395}
396
397EXPORT_SYMBOL_GPL(ipath_layer_get_lastibcstat);
398
399u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd)
400{
401 return dd->ipath_ibmtu;
402}
403
404EXPORT_SYMBOL_GPL(ipath_layer_get_ibmtu);
405
406void ipath_layer_add(struct ipath_devdata *dd) 114void ipath_layer_add(struct ipath_devdata *dd)
407{ 115{
408 mutex_lock(&ipath_layer_mutex); 116 mutex_lock(&ipath_layer_mutex);
@@ -411,10 +119,6 @@ void ipath_layer_add(struct ipath_devdata *dd)
411 dd->ipath_layer.l_arg = 119 dd->ipath_layer.l_arg =
412 layer_add_one(dd->ipath_unit, dd); 120 layer_add_one(dd->ipath_unit, dd);
413 121
414 if (verbs_add_one)
415 dd->verbs_layer.l_arg =
416 verbs_add_one(dd->ipath_unit, dd);
417
418 mutex_unlock(&ipath_layer_mutex); 122 mutex_unlock(&ipath_layer_mutex);
419} 123}
420 124
@@ -427,11 +131,6 @@ void ipath_layer_remove(struct ipath_devdata *dd)
427 dd->ipath_layer.l_arg = NULL; 131 dd->ipath_layer.l_arg = NULL;
428 } 132 }
429 133
430 if (dd->verbs_layer.l_arg && verbs_remove_one) {
431 verbs_remove_one(dd->verbs_layer.l_arg);
432 dd->verbs_layer.l_arg = NULL;
433 }
434
435 mutex_unlock(&ipath_layer_mutex); 134 mutex_unlock(&ipath_layer_mutex);
436} 135}
437 136
@@ -463,9 +162,6 @@ int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
463 if (dd->ipath_layer.l_arg) 162 if (dd->ipath_layer.l_arg)
464 continue; 163 continue;
465 164
466 if (!(*dd->ipath_statusp & IPATH_STATUS_SMA))
467 *dd->ipath_statusp |= IPATH_STATUS_OIB_SMA;
468
469 spin_unlock_irqrestore(&ipath_devs_lock, flags); 165 spin_unlock_irqrestore(&ipath_devs_lock, flags);
470 dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd); 166 dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd);
471 spin_lock_irqsave(&ipath_devs_lock, flags); 167 spin_lock_irqsave(&ipath_devs_lock, flags);
@@ -509,107 +205,6 @@ void ipath_layer_unregister(void)
509 205
510EXPORT_SYMBOL_GPL(ipath_layer_unregister); 206EXPORT_SYMBOL_GPL(ipath_layer_unregister);
511 207
512static void __ipath_verbs_timer(unsigned long arg)
513{
514 struct ipath_devdata *dd = (struct ipath_devdata *) arg;
515
516 /*
517 * If port 0 receive packet interrupts are not available, or
518 * can be missed, poll the receive queue
519 */
520 if (dd->ipath_flags & IPATH_POLL_RX_INTR)
521 ipath_kreceive(dd);
522
523 /* Handle verbs layer timeouts. */
524 if (dd->verbs_layer.l_arg && verbs_timer_cb)
525 verbs_timer_cb(dd->verbs_layer.l_arg);
526
527 mod_timer(&dd->verbs_layer.l_timer, jiffies + 1);
528}
529
530/**
531 * ipath_verbs_register - verbs layer registration
532 * @l_piobufavail: callback for when PIO buffers become available
533 * @l_rcv: callback for receiving a packet
534 * @l_timer_cb: timer callback
535 * @ipath_devdata: device data structure is put here
536 */
537int ipath_verbs_register(void *(*l_add)(int, struct ipath_devdata *),
538 void (*l_remove)(void *arg),
539 int (*l_piobufavail) (void *arg),
540 void (*l_rcv) (void *arg, void *rhdr,
541 void *data, u32 tlen),
542 void (*l_timer_cb) (void *arg))
543{
544 struct ipath_devdata *dd, *tmp;
545 unsigned long flags;
546
547 mutex_lock(&ipath_layer_mutex);
548
549 verbs_add_one = l_add;
550 verbs_remove_one = l_remove;
551 verbs_piobufavail = l_piobufavail;
552 verbs_rcv = l_rcv;
553 verbs_timer_cb = l_timer_cb;
554
555 spin_lock_irqsave(&ipath_devs_lock, flags);
556
557 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
558 if (!(dd->ipath_flags & IPATH_INITTED))
559 continue;
560
561 if (dd->verbs_layer.l_arg)
562 continue;
563
564 spin_unlock_irqrestore(&ipath_devs_lock, flags);
565 dd->verbs_layer.l_arg = l_add(dd->ipath_unit, dd);
566 spin_lock_irqsave(&ipath_devs_lock, flags);
567 }
568
569 spin_unlock_irqrestore(&ipath_devs_lock, flags);
570 mutex_unlock(&ipath_layer_mutex);
571
572 ipath_verbs_registered = 1;
573
574 return 0;
575}
576
577EXPORT_SYMBOL_GPL(ipath_verbs_register);
578
579void ipath_verbs_unregister(void)
580{
581 struct ipath_devdata *dd, *tmp;
582 unsigned long flags;
583
584 mutex_lock(&ipath_layer_mutex);
585 spin_lock_irqsave(&ipath_devs_lock, flags);
586
587 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
588 *dd->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
589
590 if (dd->verbs_layer.l_arg && verbs_remove_one) {
591 spin_unlock_irqrestore(&ipath_devs_lock, flags);
592 verbs_remove_one(dd->verbs_layer.l_arg);
593 spin_lock_irqsave(&ipath_devs_lock, flags);
594 dd->verbs_layer.l_arg = NULL;
595 }
596 }
597
598 spin_unlock_irqrestore(&ipath_devs_lock, flags);
599
600 verbs_add_one = NULL;
601 verbs_remove_one = NULL;
602 verbs_piobufavail = NULL;
603 verbs_rcv = NULL;
604 verbs_timer_cb = NULL;
605
606 ipath_verbs_registered = 0;
607
608 mutex_unlock(&ipath_layer_mutex);
609}
610
611EXPORT_SYMBOL_GPL(ipath_verbs_unregister);
612
613int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax) 208int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
614{ 209{
615 int ret; 210 int ret;
@@ -698,390 +293,6 @@ u16 ipath_layer_get_bcast(struct ipath_devdata *dd)
698 293
699EXPORT_SYMBOL_GPL(ipath_layer_get_bcast); 294EXPORT_SYMBOL_GPL(ipath_layer_get_bcast);
700 295
701u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd)
702{
703 return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
704}
705
706EXPORT_SYMBOL_GPL(ipath_layer_get_cr_errpkey);
707
708static void update_sge(struct ipath_sge_state *ss, u32 length)
709{
710 struct ipath_sge *sge = &ss->sge;
711
712 sge->vaddr += length;
713 sge->length -= length;
714 sge->sge_length -= length;
715 if (sge->sge_length == 0) {
716 if (--ss->num_sge)
717 *sge = *ss->sg_list++;
718 } else if (sge->length == 0 && sge->mr != NULL) {
719 if (++sge->n >= IPATH_SEGSZ) {
720 if (++sge->m >= sge->mr->mapsz)
721 return;
722 sge->n = 0;
723 }
724 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
725 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
726 }
727}
728
729#ifdef __LITTLE_ENDIAN
730static inline u32 get_upper_bits(u32 data, u32 shift)
731{
732 return data >> shift;
733}
734
735static inline u32 set_upper_bits(u32 data, u32 shift)
736{
737 return data << shift;
738}
739
740static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
741{
742 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
743 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
744 return data;
745}
746#else
747static inline u32 get_upper_bits(u32 data, u32 shift)
748{
749 return data << shift;
750}
751
752static inline u32 set_upper_bits(u32 data, u32 shift)
753{
754 return data >> shift;
755}
756
757static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
758{
759 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
760 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
761 return data;
762}
763#endif
764
765static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
766 u32 length)
767{
768 u32 extra = 0;
769 u32 data = 0;
770 u32 last;
771
772 while (1) {
773 u32 len = ss->sge.length;
774 u32 off;
775
776 BUG_ON(len == 0);
777 if (len > length)
778 len = length;
779 if (len > ss->sge.sge_length)
780 len = ss->sge.sge_length;
781 /* If the source address is not aligned, try to align it. */
782 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
783 if (off) {
784 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
785 ~(sizeof(u32) - 1));
786 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
787 u32 y;
788
789 y = sizeof(u32) - off;
790 if (len > y)
791 len = y;
792 if (len + extra >= sizeof(u32)) {
793 data |= set_upper_bits(v, extra *
794 BITS_PER_BYTE);
795 len = sizeof(u32) - extra;
796 if (len == length) {
797 last = data;
798 break;
799 }
800 __raw_writel(data, piobuf);
801 piobuf++;
802 extra = 0;
803 data = 0;
804 } else {
805 /* Clear unused upper bytes */
806 data |= clear_upper_bytes(v, len, extra);
807 if (len == length) {
808 last = data;
809 break;
810 }
811 extra += len;
812 }
813 } else if (extra) {
814 /* Source address is aligned. */
815 u32 *addr = (u32 *) ss->sge.vaddr;
816 int shift = extra * BITS_PER_BYTE;
817 int ushift = 32 - shift;
818 u32 l = len;
819
820 while (l >= sizeof(u32)) {
821 u32 v = *addr;
822
823 data |= set_upper_bits(v, shift);
824 __raw_writel(data, piobuf);
825 data = get_upper_bits(v, ushift);
826 piobuf++;
827 addr++;
828 l -= sizeof(u32);
829 }
830 /*
831 * We still have 'extra' number of bytes leftover.
832 */
833 if (l) {
834 u32 v = *addr;
835
836 if (l + extra >= sizeof(u32)) {
837 data |= set_upper_bits(v, shift);
838 len -= l + extra - sizeof(u32);
839 if (len == length) {
840 last = data;
841 break;
842 }
843 __raw_writel(data, piobuf);
844 piobuf++;
845 extra = 0;
846 data = 0;
847 } else {
848 /* Clear unused upper bytes */
849 data |= clear_upper_bytes(v, l,
850 extra);
851 if (len == length) {
852 last = data;
853 break;
854 }
855 extra += l;
856 }
857 } else if (len == length) {
858 last = data;
859 break;
860 }
861 } else if (len == length) {
862 u32 w;
863
864 /*
865 * Need to round up for the last dword in the
866 * packet.
867 */
868 w = (len + 3) >> 2;
869 __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
870 piobuf += w - 1;
871 last = ((u32 *) ss->sge.vaddr)[w - 1];
872 break;
873 } else {
874 u32 w = len >> 2;
875
876 __iowrite32_copy(piobuf, ss->sge.vaddr, w);
877 piobuf += w;
878
879 extra = len & (sizeof(u32) - 1);
880 if (extra) {
881 u32 v = ((u32 *) ss->sge.vaddr)[w];
882
883 /* Clear unused upper bytes */
884 data = clear_upper_bytes(v, extra, 0);
885 }
886 }
887 update_sge(ss, len);
888 length -= len;
889 }
890 /* Update address before sending packet. */
891 update_sge(ss, length);
892 /* must flush early everything before trigger word */
893 ipath_flush_wc();
894 __raw_writel(last, piobuf);
895 /* be sure trigger word is written */
896 ipath_flush_wc();
897}
898
899/**
900 * ipath_verbs_send - send a packet from the verbs layer
901 * @dd: the infinipath device
902 * @hdrwords: the number of words in the header
903 * @hdr: the packet header
904 * @len: the length of the packet in bytes
905 * @ss: the SGE to send
906 *
907 * This is like ipath_sma_send_pkt() in that we need to be able to send
908 * packets after the chip is initialized (MADs) but also like
909 * ipath_layer_send_hdr() since its used by the verbs layer.
910 */
911int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
912 u32 *hdr, u32 len, struct ipath_sge_state *ss)
913{
914 u32 __iomem *piobuf;
915 u32 plen;
916 int ret;
917
918 /* +1 is for the qword padding of pbc */
919 plen = hdrwords + ((len + 3) >> 2) + 1;
920 if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
921 ipath_dbg("packet len 0x%x too long, failing\n", plen);
922 ret = -EINVAL;
923 goto bail;
924 }
925
926 /* Get a PIO buffer to use. */
927 piobuf = ipath_getpiobuf(dd, NULL);
928 if (unlikely(piobuf == NULL)) {
929 ret = -EBUSY;
930 goto bail;
931 }
932
933 /*
934 * Write len to control qword, no flags.
935 * We have to flush after the PBC for correctness on some cpus
936 * or WC buffer can be written out of order.
937 */
938 writeq(plen, piobuf);
939 ipath_flush_wc();
940 piobuf += 2;
941 if (len == 0) {
942 /*
943 * If there is just the header portion, must flush before
944 * writing last word of header for correctness, and after
945 * the last header word (trigger word).
946 */
947 __iowrite32_copy(piobuf, hdr, hdrwords - 1);
948 ipath_flush_wc();
949 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
950 ipath_flush_wc();
951 ret = 0;
952 goto bail;
953 }
954
955 __iowrite32_copy(piobuf, hdr, hdrwords);
956 piobuf += hdrwords;
957
958 /* The common case is aligned and contained in one segment. */
959 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
960 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
961 u32 w;
962 u32 *addr = (u32 *) ss->sge.vaddr;
963
964 /* Update address before sending packet. */
965 update_sge(ss, len);
966 /* Need to round up for the last dword in the packet. */
967 w = (len + 3) >> 2;
968 __iowrite32_copy(piobuf, addr, w - 1);
969 /* must flush early everything before trigger word */
970 ipath_flush_wc();
971 __raw_writel(addr[w - 1], piobuf + w - 1);
972 /* be sure trigger word is written */
973 ipath_flush_wc();
974 ret = 0;
975 goto bail;
976 }
977 copy_io(piobuf, ss, len);
978 ret = 0;
979
980bail:
981 return ret;
982}
983
984EXPORT_SYMBOL_GPL(ipath_verbs_send);
985
986int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
987 u64 *rwords, u64 *spkts, u64 *rpkts,
988 u64 *xmit_wait)
989{
990 int ret;
991
992 if (!(dd->ipath_flags & IPATH_INITTED)) {
993 /* no hardware, freeze, etc. */
994 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
995 ret = -EINVAL;
996 goto bail;
997 }
998 *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
999 *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
1000 *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
1001 *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
1002 *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
1003
1004 ret = 0;
1005
1006bail:
1007 return ret;
1008}
1009
1010EXPORT_SYMBOL_GPL(ipath_layer_snapshot_counters);
1011
1012/**
1013 * ipath_layer_get_counters - get various chip counters
1014 * @dd: the infinipath device
1015 * @cntrs: counters are placed here
1016 *
1017 * Return the counters needed by recv_pma_get_portcounters().
1018 */
1019int ipath_layer_get_counters(struct ipath_devdata *dd,
1020 struct ipath_layer_counters *cntrs)
1021{
1022 int ret;
1023
1024 if (!(dd->ipath_flags & IPATH_INITTED)) {
1025 /* no hardware, freeze, etc. */
1026 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
1027 ret = -EINVAL;
1028 goto bail;
1029 }
1030 cntrs->symbol_error_counter =
1031 ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
1032 cntrs->link_error_recovery_counter =
1033 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
1034 /*
1035 * The link downed counter counts when the other side downs the
1036 * connection. We add in the number of times we downed the link
1037 * due to local link integrity errors to compensate.
1038 */
1039 cntrs->link_downed_counter =
1040 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
1041 cntrs->port_rcv_errors =
1042 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
1043 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
1044 ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
1045 ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
1046 ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
1047 ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
1048 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
1049 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
1050 ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
1051 cntrs->port_rcv_remphys_errors =
1052 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
1053 cntrs->port_xmit_discards =
1054 ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt);
1055 cntrs->port_xmit_data =
1056 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
1057 cntrs->port_rcv_data =
1058 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
1059 cntrs->port_xmit_packets =
1060 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
1061 cntrs->port_rcv_packets =
1062 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
1063 cntrs->local_link_integrity_errors = dd->ipath_lli_errors;
1064 cntrs->excessive_buffer_overrun_errors = 0; /* XXX */
1065
1066 ret = 0;
1067
1068bail:
1069 return ret;
1070}
1071
1072EXPORT_SYMBOL_GPL(ipath_layer_get_counters);
1073
1074int ipath_layer_want_buffer(struct ipath_devdata *dd)
1075{
1076 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
1077 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1078 dd->ipath_sendctrl);
1079
1080 return 0;
1081}
1082
1083EXPORT_SYMBOL_GPL(ipath_layer_want_buffer);
1084
1085int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr) 296int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr)
1086{ 297{
1087 int ret = 0; 298 int ret = 0;
@@ -1153,389 +364,3 @@ int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd)
1153} 364}
1154 365
1155EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int); 366EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int);
1156
1157int ipath_layer_enable_timer(struct ipath_devdata *dd)
1158{
1159 /*
1160 * HT-400 has a design flaw where the chip and kernel idea
1161 * of the tail register don't always agree, and therefore we won't
1162 * get an interrupt on the next packet received.
1163 * If the board supports per packet receive interrupts, use it.
1164 * Otherwise, the timer function periodically checks for packets
1165 * to cover this case.
1166 * Either way, the timer is needed for verbs layer related
1167 * processing.
1168 */
1169 if (dd->ipath_flags & IPATH_GPIO_INTR) {
1170 ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
1171 0x2074076542310ULL);
1172 /* Enable GPIO bit 2 interrupt */
1173 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1174 (u64) (1 << 2));
1175 }
1176
1177 init_timer(&dd->verbs_layer.l_timer);
1178 dd->verbs_layer.l_timer.function = __ipath_verbs_timer;
1179 dd->verbs_layer.l_timer.data = (unsigned long)dd;
1180 dd->verbs_layer.l_timer.expires = jiffies + 1;
1181 add_timer(&dd->verbs_layer.l_timer);
1182
1183 return 0;
1184}
1185
1186EXPORT_SYMBOL_GPL(ipath_layer_enable_timer);
1187
1188int ipath_layer_disable_timer(struct ipath_devdata *dd)
1189{
1190 /* Disable GPIO bit 2 interrupt */
1191 if (dd->ipath_flags & IPATH_GPIO_INTR)
1192 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0);
1193
1194 del_timer_sync(&dd->verbs_layer.l_timer);
1195
1196 return 0;
1197}
1198
1199EXPORT_SYMBOL_GPL(ipath_layer_disable_timer);
1200
1201/**
1202 * ipath_layer_set_verbs_flags - set the verbs layer flags
1203 * @dd: the infinipath device
1204 * @flags: the flags to set
1205 */
1206int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags)
1207{
1208 struct ipath_devdata *ss;
1209 unsigned long lflags;
1210
1211 spin_lock_irqsave(&ipath_devs_lock, lflags);
1212
1213 list_for_each_entry(ss, &ipath_dev_list, ipath_list) {
1214 if (!(ss->ipath_flags & IPATH_INITTED))
1215 continue;
1216 if ((flags & IPATH_VERBS_KERNEL_SMA) &&
1217 !(*ss->ipath_statusp & IPATH_STATUS_SMA))
1218 *ss->ipath_statusp |= IPATH_STATUS_OIB_SMA;
1219 else
1220 *ss->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
1221 }
1222
1223 spin_unlock_irqrestore(&ipath_devs_lock, lflags);
1224
1225 return 0;
1226}
1227
1228EXPORT_SYMBOL_GPL(ipath_layer_set_verbs_flags);
1229
1230/**
1231 * ipath_layer_get_npkeys - return the size of the PKEY table for port 0
1232 * @dd: the infinipath device
1233 */
1234unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd)
1235{
1236 return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
1237}
1238
1239EXPORT_SYMBOL_GPL(ipath_layer_get_npkeys);
1240
1241/**
1242 * ipath_layer_get_pkey - return the indexed PKEY from the port 0 PKEY table
1243 * @dd: the infinipath device
1244 * @index: the PKEY index
1245 */
1246unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index)
1247{
1248 unsigned ret;
1249
1250 if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
1251 ret = 0;
1252 else
1253 ret = dd->ipath_pd[0]->port_pkeys[index];
1254
1255 return ret;
1256}
1257
1258EXPORT_SYMBOL_GPL(ipath_layer_get_pkey);
1259
1260/**
1261 * ipath_layer_get_pkeys - return the PKEY table for port 0
1262 * @dd: the infinipath device
1263 * @pkeys: the pkey table is placed here
1264 */
1265int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
1266{
1267 struct ipath_portdata *pd = dd->ipath_pd[0];
1268
1269 memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
1270
1271 return 0;
1272}
1273
1274EXPORT_SYMBOL_GPL(ipath_layer_get_pkeys);
1275
1276/**
1277 * rm_pkey - decrecment the reference count for the given PKEY
1278 * @dd: the infinipath device
1279 * @key: the PKEY index
1280 *
1281 * Return true if this was the last reference and the hardware table entry
1282 * needs to be changed.
1283 */
1284static int rm_pkey(struct ipath_devdata *dd, u16 key)
1285{
1286 int i;
1287 int ret;
1288
1289 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1290 if (dd->ipath_pkeys[i] != key)
1291 continue;
1292 if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
1293 dd->ipath_pkeys[i] = 0;
1294 ret = 1;
1295 goto bail;
1296 }
1297 break;
1298 }
1299
1300 ret = 0;
1301
1302bail:
1303 return ret;
1304}
1305
1306/**
1307 * add_pkey - add the given PKEY to the hardware table
1308 * @dd: the infinipath device
1309 * @key: the PKEY
1310 *
1311 * Return an error code if unable to add the entry, zero if no change,
1312 * or 1 if the hardware PKEY register needs to be updated.
1313 */
1314static int add_pkey(struct ipath_devdata *dd, u16 key)
1315{
1316 int i;
1317 u16 lkey = key & 0x7FFF;
1318 int any = 0;
1319 int ret;
1320
1321 if (lkey == 0x7FFF) {
1322 ret = 0;
1323 goto bail;
1324 }
1325
1326 /* Look for an empty slot or a matching PKEY. */
1327 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1328 if (!dd->ipath_pkeys[i]) {
1329 any++;
1330 continue;
1331 }
1332 /* If it matches exactly, try to increment the ref count */
1333 if (dd->ipath_pkeys[i] == key) {
1334 if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
1335 ret = 0;
1336 goto bail;
1337 }
1338 /* Lost the race. Look for an empty slot below. */
1339 atomic_dec(&dd->ipath_pkeyrefs[i]);
1340 any++;
1341 }
1342 /*
1343 * It makes no sense to have both the limited and unlimited
1344 * PKEY set at the same time since the unlimited one will
1345 * disable the limited one.
1346 */
1347 if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
1348 ret = -EEXIST;
1349 goto bail;
1350 }
1351 }
1352 if (!any) {
1353 ret = -EBUSY;
1354 goto bail;
1355 }
1356 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1357 if (!dd->ipath_pkeys[i] &&
1358 atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
1359 /* for ipathstats, etc. */
1360 ipath_stats.sps_pkeys[i] = lkey;
1361 dd->ipath_pkeys[i] = key;
1362 ret = 1;
1363 goto bail;
1364 }
1365 }
1366 ret = -EBUSY;
1367
1368bail:
1369 return ret;
1370}
1371
1372/**
1373 * ipath_layer_set_pkeys - set the PKEY table for port 0
1374 * @dd: the infinipath device
1375 * @pkeys: the PKEY table
1376 */
1377int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 * pkeys)
1378{
1379 struct ipath_portdata *pd;
1380 int i;
1381 int changed = 0;
1382
1383 pd = dd->ipath_pd[0];
1384
1385 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
1386 u16 key = pkeys[i];
1387 u16 okey = pd->port_pkeys[i];
1388
1389 if (key == okey)
1390 continue;
1391 /*
1392 * The value of this PKEY table entry is changing.
1393 * Remove the old entry in the hardware's array of PKEYs.
1394 */
1395 if (okey & 0x7FFF)
1396 changed |= rm_pkey(dd, okey);
1397 if (key & 0x7FFF) {
1398 int ret = add_pkey(dd, key);
1399
1400 if (ret < 0)
1401 key = 0;
1402 else
1403 changed |= ret;
1404 }
1405 pd->port_pkeys[i] = key;
1406 }
1407 if (changed) {
1408 u64 pkey;
1409
1410 pkey = (u64) dd->ipath_pkeys[0] |
1411 ((u64) dd->ipath_pkeys[1] << 16) |
1412 ((u64) dd->ipath_pkeys[2] << 32) |
1413 ((u64) dd->ipath_pkeys[3] << 48);
1414 ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
1415 (unsigned long long) pkey);
1416 ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
1417 pkey);
1418 }
1419 return 0;
1420}
1421
1422EXPORT_SYMBOL_GPL(ipath_layer_set_pkeys);
1423
1424/**
1425 * ipath_layer_get_linkdowndefaultstate - get the default linkdown state
1426 * @dd: the infinipath device
1427 *
1428 * Returns zero if the default is POLL, 1 if the default is SLEEP.
1429 */
1430int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd)
1431{
1432 return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
1433}
1434
1435EXPORT_SYMBOL_GPL(ipath_layer_get_linkdowndefaultstate);
1436
1437/**
1438 * ipath_layer_set_linkdowndefaultstate - set the default linkdown state
1439 * @dd: the infinipath device
1440 * @sleep: the new state
1441 *
1442 * Note that this will only take effect when the link state changes.
1443 */
1444int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd,
1445 int sleep)
1446{
1447 if (sleep)
1448 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
1449 else
1450 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
1451 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1452 dd->ipath_ibcctrl);
1453 return 0;
1454}
1455
1456EXPORT_SYMBOL_GPL(ipath_layer_set_linkdowndefaultstate);
1457
1458int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd)
1459{
1460 return (dd->ipath_ibcctrl >>
1461 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1462 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1463}
1464
1465EXPORT_SYMBOL_GPL(ipath_layer_get_phyerrthreshold);
1466
1467/**
1468 * ipath_layer_set_phyerrthreshold - set the physical error threshold
1469 * @dd: the infinipath device
1470 * @n: the new threshold
1471 *
1472 * Note that this will only take effect when the link state changes.
1473 */
1474int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
1475{
1476 unsigned v;
1477
1478 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1479 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1480 if (v != n) {
1481 dd->ipath_ibcctrl &=
1482 ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
1483 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
1484 dd->ipath_ibcctrl |=
1485 (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
1486 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1487 dd->ipath_ibcctrl);
1488 }
1489 return 0;
1490}
1491
1492EXPORT_SYMBOL_GPL(ipath_layer_set_phyerrthreshold);
1493
1494int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd)
1495{
1496 return (dd->ipath_ibcctrl >>
1497 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
1498 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
1499}
1500
1501EXPORT_SYMBOL_GPL(ipath_layer_get_overrunthreshold);
1502
1503/**
1504 * ipath_layer_set_overrunthreshold - set the overrun threshold
1505 * @dd: the infinipath device
1506 * @n: the new threshold
1507 *
1508 * Note that this will only take effect when the link state changes.
1509 */
1510int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
1511{
1512 unsigned v;
1513
1514 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
1515 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
1516 if (v != n) {
1517 dd->ipath_ibcctrl &=
1518 ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
1519 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
1520 dd->ipath_ibcctrl |=
1521 (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
1522 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1523 dd->ipath_ibcctrl);
1524 }
1525 return 0;
1526}
1527
1528EXPORT_SYMBOL_GPL(ipath_layer_set_overrunthreshold);
1529
1530int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name,
1531 size_t namelen)
1532{
1533 return dd->ipath_f_get_boardname(dd, name, namelen);
1534}
1535EXPORT_SYMBOL_GPL(ipath_layer_get_boardname);
1536
1537u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd)
1538{
1539 return dd->ipath_rcvhdrentsize;
1540}
1541EXPORT_SYMBOL_GPL(ipath_layer_get_rcvhdrentsize);
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.h b/drivers/infiniband/hw/ipath/ipath_layer.h
index 71485096fcac..3854a4eae684 100644
--- a/drivers/infiniband/hw/ipath/ipath_layer.h
+++ b/drivers/infiniband/hw/ipath/ipath_layer.h
@@ -40,73 +40,9 @@
40 */ 40 */
41 41
42struct sk_buff; 42struct sk_buff;
43struct ipath_sge_state;
44struct ipath_devdata; 43struct ipath_devdata;
45struct ether_header; 44struct ether_header;
46 45
47struct ipath_layer_counters {
48 u64 symbol_error_counter;
49 u64 link_error_recovery_counter;
50 u64 link_downed_counter;
51 u64 port_rcv_errors;
52 u64 port_rcv_remphys_errors;
53 u64 port_xmit_discards;
54 u64 port_xmit_data;
55 u64 port_rcv_data;
56 u64 port_xmit_packets;
57 u64 port_rcv_packets;
58 u32 local_link_integrity_errors;
59 u32 excessive_buffer_overrun_errors;
60};
61
62/*
63 * A segment is a linear region of low physical memory.
64 * XXX Maybe we should use phys addr here and kmap()/kunmap().
65 * Used by the verbs layer.
66 */
67struct ipath_seg {
68 void *vaddr;
69 size_t length;
70};
71
72/* The number of ipath_segs that fit in a page. */
73#define IPATH_SEGSZ (PAGE_SIZE / sizeof (struct ipath_seg))
74
75struct ipath_segarray {
76 struct ipath_seg segs[IPATH_SEGSZ];
77};
78
79struct ipath_mregion {
80 u64 user_base; /* User's address for this region */
81 u64 iova; /* IB start address of this region */
82 size_t length;
83 u32 lkey;
84 u32 offset; /* offset (bytes) to start of region */
85 int access_flags;
86 u32 max_segs; /* number of ipath_segs in all the arrays */
87 u32 mapsz; /* size of the map array */
88 struct ipath_segarray *map[0]; /* the segments */
89};
90
91/*
92 * These keep track of the copy progress within a memory region.
93 * Used by the verbs layer.
94 */
95struct ipath_sge {
96 struct ipath_mregion *mr;
97 void *vaddr; /* current pointer into the segment */
98 u32 sge_length; /* length of the SGE */
99 u32 length; /* remaining length of the segment */
100 u16 m; /* current index: mr->map[m] */
101 u16 n; /* current index: mr->map[m]->segs[n] */
102};
103
104struct ipath_sge_state {
105 struct ipath_sge *sg_list; /* next SGE to be used if any */
106 struct ipath_sge sge; /* progress state for the current SGE */
107 u8 num_sge;
108};
109
110int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *), 46int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
111 void (*l_remove)(void *), 47 void (*l_remove)(void *),
112 int (*l_intr)(void *, u32), 48 int (*l_intr)(void *, u32),
@@ -114,62 +50,14 @@ int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
114 struct sk_buff *), 50 struct sk_buff *),
115 u16 rcv_opcode, 51 u16 rcv_opcode,
116 int (*l_rcv_lid)(void *, void *)); 52 int (*l_rcv_lid)(void *, void *));
117int ipath_verbs_register(void *(*l_add)(int, struct ipath_devdata *),
118 void (*l_remove)(void *arg),
119 int (*l_piobufavail)(void *arg),
120 void (*l_rcv)(void *arg, void *rhdr,
121 void *data, u32 tlen),
122 void (*l_timer_cb)(void *arg));
123void ipath_layer_unregister(void); 53void ipath_layer_unregister(void);
124void ipath_verbs_unregister(void);
125int ipath_layer_open(struct ipath_devdata *, u32 * pktmax); 54int ipath_layer_open(struct ipath_devdata *, u32 * pktmax);
126u16 ipath_layer_get_lid(struct ipath_devdata *dd); 55u16 ipath_layer_get_lid(struct ipath_devdata *dd);
127int ipath_layer_get_mac(struct ipath_devdata *dd, u8 *); 56int ipath_layer_get_mac(struct ipath_devdata *dd, u8 *);
128u16 ipath_layer_get_bcast(struct ipath_devdata *dd); 57u16 ipath_layer_get_bcast(struct ipath_devdata *dd);
129u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd);
130int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 state);
131int ipath_layer_set_mtu(struct ipath_devdata *, u16);
132int ipath_set_lid(struct ipath_devdata *, u32, u8);
133int ipath_layer_send_hdr(struct ipath_devdata *dd, 58int ipath_layer_send_hdr(struct ipath_devdata *dd,
134 struct ether_header *hdr); 59 struct ether_header *hdr);
135int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
136 u32 * hdr, u32 len, struct ipath_sge_state *ss);
137int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd); 60int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd);
138int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name,
139 size_t namelen);
140int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
141 u64 *rwords, u64 *spkts, u64 *rpkts,
142 u64 *xmit_wait);
143int ipath_layer_get_counters(struct ipath_devdata *dd,
144 struct ipath_layer_counters *cntrs);
145int ipath_layer_want_buffer(struct ipath_devdata *dd);
146int ipath_layer_set_guid(struct ipath_devdata *, __be64 guid);
147__be64 ipath_layer_get_guid(struct ipath_devdata *);
148u32 ipath_layer_get_nguid(struct ipath_devdata *);
149u32 ipath_layer_get_majrev(struct ipath_devdata *);
150u32 ipath_layer_get_minrev(struct ipath_devdata *);
151u32 ipath_layer_get_pcirev(struct ipath_devdata *);
152u32 ipath_layer_get_flags(struct ipath_devdata *dd);
153struct device *ipath_layer_get_device(struct ipath_devdata *dd);
154u16 ipath_layer_get_deviceid(struct ipath_devdata *dd);
155u32 ipath_layer_get_vendorid(struct ipath_devdata *);
156u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd);
157u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd);
158int ipath_layer_enable_timer(struct ipath_devdata *dd);
159int ipath_layer_disable_timer(struct ipath_devdata *dd);
160int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags);
161unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd);
162unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index);
163int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 *pkeys);
164int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 *pkeys);
165int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd);
166int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd,
167 int sleep);
168int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd);
169int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n);
170int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd);
171int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n);
172u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd);
173 61
174/* ipath_ether interrupt values */ 62/* ipath_ether interrupt values */
175#define IPATH_LAYER_INT_IF_UP 0x2 63#define IPATH_LAYER_INT_IF_UP 0x2
@@ -178,9 +66,6 @@ u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd);
178#define IPATH_LAYER_INT_SEND_CONTINUE 0x10 66#define IPATH_LAYER_INT_SEND_CONTINUE 0x10
179#define IPATH_LAYER_INT_BCAST 0x40 67#define IPATH_LAYER_INT_BCAST 0x40
180 68
181/* _verbs_layer.l_flags */
182#define IPATH_VERBS_KERNEL_SMA 0x1
183
184extern unsigned ipath_debug; /* debugging bit mask */ 69extern unsigned ipath_debug; /* debugging bit mask */
185 70
186#endif /* _IPATH_LAYER_H */ 71#endif /* _IPATH_LAYER_H */
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index d3402341b7d0..72d1db89db8f 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -101,15 +101,15 @@ static int recv_subn_get_nodeinfo(struct ib_smp *smp,
101 nip->num_ports = ibdev->phys_port_cnt; 101 nip->num_ports = ibdev->phys_port_cnt;
102 /* This is already in network order */ 102 /* This is already in network order */
103 nip->sys_guid = to_idev(ibdev)->sys_image_guid; 103 nip->sys_guid = to_idev(ibdev)->sys_image_guid;
104 nip->node_guid = ipath_layer_get_guid(dd); 104 nip->node_guid = dd->ipath_guid;
105 nip->port_guid = nip->sys_guid; 105 nip->port_guid = nip->sys_guid;
106 nip->partition_cap = cpu_to_be16(ipath_layer_get_npkeys(dd)); 106 nip->partition_cap = cpu_to_be16(ipath_get_npkeys(dd));
107 nip->device_id = cpu_to_be16(ipath_layer_get_deviceid(dd)); 107 nip->device_id = cpu_to_be16(dd->ipath_deviceid);
108 majrev = ipath_layer_get_majrev(dd); 108 majrev = dd->ipath_majrev;
109 minrev = ipath_layer_get_minrev(dd); 109 minrev = dd->ipath_minrev;
110 nip->revision = cpu_to_be32((majrev << 16) | minrev); 110 nip->revision = cpu_to_be32((majrev << 16) | minrev);
111 nip->local_port_num = port; 111 nip->local_port_num = port;
112 vendor = ipath_layer_get_vendorid(dd); 112 vendor = dd->ipath_vendorid;
113 nip->vendor_id[0] = 0; 113 nip->vendor_id[0] = 0;
114 nip->vendor_id[1] = vendor >> 8; 114 nip->vendor_id[1] = vendor >> 8;
115 nip->vendor_id[2] = vendor; 115 nip->vendor_id[2] = vendor;
@@ -133,13 +133,89 @@ static int recv_subn_get_guidinfo(struct ib_smp *smp,
133 */ 133 */
134 if (startgx == 0) 134 if (startgx == 0)
135 /* The first is a copy of the read-only HW GUID. */ 135 /* The first is a copy of the read-only HW GUID. */
136 *p = ipath_layer_get_guid(to_idev(ibdev)->dd); 136 *p = to_idev(ibdev)->dd->ipath_guid;
137 else 137 else
138 smp->status |= IB_SMP_INVALID_FIELD; 138 smp->status |= IB_SMP_INVALID_FIELD;
139 139
140 return reply(smp); 140 return reply(smp);
141} 141}
142 142
143
144static int get_overrunthreshold(struct ipath_devdata *dd)
145{
146 return (dd->ipath_ibcctrl >>
147 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
148 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
149}
150
151/**
152 * set_overrunthreshold - set the overrun threshold
153 * @dd: the infinipath device
154 * @n: the new threshold
155 *
156 * Note that this will only take effect when the link state changes.
157 */
158static int set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
159{
160 unsigned v;
161
162 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
163 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
164 if (v != n) {
165 dd->ipath_ibcctrl &=
166 ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
167 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
168 dd->ipath_ibcctrl |=
169 (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
170 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
171 dd->ipath_ibcctrl);
172 }
173 return 0;
174}
175
176static int get_phyerrthreshold(struct ipath_devdata *dd)
177{
178 return (dd->ipath_ibcctrl >>
179 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
180 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
181}
182
183/**
184 * set_phyerrthreshold - set the physical error threshold
185 * @dd: the infinipath device
186 * @n: the new threshold
187 *
188 * Note that this will only take effect when the link state changes.
189 */
190static int set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
191{
192 unsigned v;
193
194 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
195 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
196 if (v != n) {
197 dd->ipath_ibcctrl &=
198 ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
199 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
200 dd->ipath_ibcctrl |=
201 (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
202 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
203 dd->ipath_ibcctrl);
204 }
205 return 0;
206}
207
208/**
209 * get_linkdowndefaultstate - get the default linkdown state
210 * @dd: the infinipath device
211 *
212 * Returns zero if the default is POLL, 1 if the default is SLEEP.
213 */
214static int get_linkdowndefaultstate(struct ipath_devdata *dd)
215{
216 return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
217}
218
143static int recv_subn_get_portinfo(struct ib_smp *smp, 219static int recv_subn_get_portinfo(struct ib_smp *smp,
144 struct ib_device *ibdev, u8 port) 220 struct ib_device *ibdev, u8 port)
145{ 221{
@@ -166,7 +242,7 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
166 (dev->mkeyprot_resv_lmc >> 6) == 0) 242 (dev->mkeyprot_resv_lmc >> 6) == 0)
167 pip->mkey = dev->mkey; 243 pip->mkey = dev->mkey;
168 pip->gid_prefix = dev->gid_prefix; 244 pip->gid_prefix = dev->gid_prefix;
169 lid = ipath_layer_get_lid(dev->dd); 245 lid = dev->dd->ipath_lid;
170 pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE; 246 pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
171 pip->sm_lid = cpu_to_be16(dev->sm_lid); 247 pip->sm_lid = cpu_to_be16(dev->sm_lid);
172 pip->cap_mask = cpu_to_be32(dev->port_cap_flags); 248 pip->cap_mask = cpu_to_be32(dev->port_cap_flags);
@@ -177,14 +253,14 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
177 pip->link_width_supported = 3; /* 1x or 4x */ 253 pip->link_width_supported = 3; /* 1x or 4x */
178 pip->link_width_active = 2; /* 4x */ 254 pip->link_width_active = 2; /* 4x */
179 pip->linkspeed_portstate = 0x10; /* 2.5Gbps */ 255 pip->linkspeed_portstate = 0x10; /* 2.5Gbps */
180 ibcstat = ipath_layer_get_lastibcstat(dev->dd); 256 ibcstat = dev->dd->ipath_lastibcstat;
181 pip->linkspeed_portstate |= ((ibcstat >> 4) & 0x3) + 1; 257 pip->linkspeed_portstate |= ((ibcstat >> 4) & 0x3) + 1;
182 pip->portphysstate_linkdown = 258 pip->portphysstate_linkdown =
183 (ipath_cvt_physportstate[ibcstat & 0xf] << 4) | 259 (ipath_cvt_physportstate[ibcstat & 0xf] << 4) |
184 (ipath_layer_get_linkdowndefaultstate(dev->dd) ? 1 : 2); 260 (get_linkdowndefaultstate(dev->dd) ? 1 : 2);
185 pip->mkeyprot_resv_lmc = dev->mkeyprot_resv_lmc; 261 pip->mkeyprot_resv_lmc = dev->mkeyprot_resv_lmc;
186 pip->linkspeedactive_enabled = 0x11; /* 2.5Gbps, 2.5Gbps */ 262 pip->linkspeedactive_enabled = 0x11; /* 2.5Gbps, 2.5Gbps */
187 switch (ipath_layer_get_ibmtu(dev->dd)) { 263 switch (dev->dd->ipath_ibmtu) {
188 case 4096: 264 case 4096:
189 mtu = IB_MTU_4096; 265 mtu = IB_MTU_4096;
190 break; 266 break;
@@ -217,7 +293,7 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
217 pip->mkey_violations = cpu_to_be16(dev->mkey_violations); 293 pip->mkey_violations = cpu_to_be16(dev->mkey_violations);
218 /* P_KeyViolations are counted by hardware. */ 294 /* P_KeyViolations are counted by hardware. */
219 pip->pkey_violations = 295 pip->pkey_violations =
220 cpu_to_be16((ipath_layer_get_cr_errpkey(dev->dd) - 296 cpu_to_be16((ipath_get_cr_errpkey(dev->dd) -
221 dev->z_pkey_violations) & 0xFFFF); 297 dev->z_pkey_violations) & 0xFFFF);
222 pip->qkey_violations = cpu_to_be16(dev->qkey_violations); 298 pip->qkey_violations = cpu_to_be16(dev->qkey_violations);
223 /* Only the hardware GUID is supported for now */ 299 /* Only the hardware GUID is supported for now */
@@ -226,8 +302,8 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
226 /* 32.768 usec. response time (guessing) */ 302 /* 32.768 usec. response time (guessing) */
227 pip->resv_resptimevalue = 3; 303 pip->resv_resptimevalue = 3;
228 pip->localphyerrors_overrunerrors = 304 pip->localphyerrors_overrunerrors =
229 (ipath_layer_get_phyerrthreshold(dev->dd) << 4) | 305 (get_phyerrthreshold(dev->dd) << 4) |
230 ipath_layer_get_overrunthreshold(dev->dd); 306 get_overrunthreshold(dev->dd);
231 /* pip->max_credit_hint; */ 307 /* pip->max_credit_hint; */
232 /* pip->link_roundtrip_latency[3]; */ 308 /* pip->link_roundtrip_latency[3]; */
233 309
@@ -237,6 +313,20 @@ bail:
237 return ret; 313 return ret;
238} 314}
239 315
316/**
317 * get_pkeys - return the PKEY table for port 0
318 * @dd: the infinipath device
319 * @pkeys: the pkey table is placed here
320 */
321static int get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
322{
323 struct ipath_portdata *pd = dd->ipath_pd[0];
324
325 memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
326
327 return 0;
328}
329
240static int recv_subn_get_pkeytable(struct ib_smp *smp, 330static int recv_subn_get_pkeytable(struct ib_smp *smp,
241 struct ib_device *ibdev) 331 struct ib_device *ibdev)
242{ 332{
@@ -249,9 +339,9 @@ static int recv_subn_get_pkeytable(struct ib_smp *smp,
249 memset(smp->data, 0, sizeof(smp->data)); 339 memset(smp->data, 0, sizeof(smp->data));
250 if (startpx == 0) { 340 if (startpx == 0) {
251 struct ipath_ibdev *dev = to_idev(ibdev); 341 struct ipath_ibdev *dev = to_idev(ibdev);
252 unsigned i, n = ipath_layer_get_npkeys(dev->dd); 342 unsigned i, n = ipath_get_npkeys(dev->dd);
253 343
254 ipath_layer_get_pkeys(dev->dd, p); 344 get_pkeys(dev->dd, p);
255 345
256 for (i = 0; i < n; i++) 346 for (i = 0; i < n; i++)
257 q[i] = cpu_to_be16(p[i]); 347 q[i] = cpu_to_be16(p[i]);
@@ -269,6 +359,24 @@ static int recv_subn_set_guidinfo(struct ib_smp *smp,
269} 359}
270 360
271/** 361/**
362 * set_linkdowndefaultstate - set the default linkdown state
363 * @dd: the infinipath device
364 * @sleep: the new state
365 *
366 * Note that this will only take effect when the link state changes.
367 */
368static int set_linkdowndefaultstate(struct ipath_devdata *dd, int sleep)
369{
370 if (sleep)
371 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
372 else
373 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
374 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
375 dd->ipath_ibcctrl);
376 return 0;
377}
378
379/**
272 * recv_subn_set_portinfo - set port information 380 * recv_subn_set_portinfo - set port information
273 * @smp: the incoming SM packet 381 * @smp: the incoming SM packet
274 * @ibdev: the infiniband device 382 * @ibdev: the infiniband device
@@ -290,7 +398,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
290 u8 state; 398 u8 state;
291 u16 lstate; 399 u16 lstate;
292 u32 mtu; 400 u32 mtu;
293 int ret; 401 int ret, ore;
294 402
295 if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt) 403 if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt)
296 goto err; 404 goto err;
@@ -304,7 +412,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
304 dev->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period); 412 dev->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
305 413
306 lid = be16_to_cpu(pip->lid); 414 lid = be16_to_cpu(pip->lid);
307 if (lid != ipath_layer_get_lid(dev->dd)) { 415 if (lid != dev->dd->ipath_lid) {
308 /* Must be a valid unicast LID address. */ 416 /* Must be a valid unicast LID address. */
309 if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE) 417 if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE)
310 goto err; 418 goto err;
@@ -342,11 +450,11 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
342 case 0: /* NOP */ 450 case 0: /* NOP */
343 break; 451 break;
344 case 1: /* SLEEP */ 452 case 1: /* SLEEP */
345 if (ipath_layer_set_linkdowndefaultstate(dev->dd, 1)) 453 if (set_linkdowndefaultstate(dev->dd, 1))
346 goto err; 454 goto err;
347 break; 455 break;
348 case 2: /* POLL */ 456 case 2: /* POLL */
349 if (ipath_layer_set_linkdowndefaultstate(dev->dd, 0)) 457 if (set_linkdowndefaultstate(dev->dd, 0))
350 goto err; 458 goto err;
351 break; 459 break;
352 default: 460 default:
@@ -376,7 +484,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
376 /* XXX We have already partially updated our state! */ 484 /* XXX We have already partially updated our state! */
377 goto err; 485 goto err;
378 } 486 }
379 ipath_layer_set_mtu(dev->dd, mtu); 487 ipath_set_mtu(dev->dd, mtu);
380 488
381 dev->sm_sl = pip->neighbormtu_mastersmsl & 0xF; 489 dev->sm_sl = pip->neighbormtu_mastersmsl & 0xF;
382 490
@@ -392,20 +500,16 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
392 * later. 500 * later.
393 */ 501 */
394 if (pip->pkey_violations == 0) 502 if (pip->pkey_violations == 0)
395 dev->z_pkey_violations = 503 dev->z_pkey_violations = ipath_get_cr_errpkey(dev->dd);
396 ipath_layer_get_cr_errpkey(dev->dd);
397 504
398 if (pip->qkey_violations == 0) 505 if (pip->qkey_violations == 0)
399 dev->qkey_violations = 0; 506 dev->qkey_violations = 0;
400 507
401 if (ipath_layer_set_phyerrthreshold( 508 ore = pip->localphyerrors_overrunerrors;
402 dev->dd, 509 if (set_phyerrthreshold(dev->dd, (ore >> 4) & 0xF))
403 (pip->localphyerrors_overrunerrors >> 4) & 0xF))
404 goto err; 510 goto err;
405 511
406 if (ipath_layer_set_overrunthreshold( 512 if (set_overrunthreshold(dev->dd, (ore & 0xF)))
407 dev->dd,
408 (pip->localphyerrors_overrunerrors & 0xF)))
409 goto err; 513 goto err;
410 514
411 dev->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; 515 dev->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
@@ -423,7 +527,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
423 * is down or is being set to down. 527 * is down or is being set to down.
424 */ 528 */
425 state = pip->linkspeed_portstate & 0xF; 529 state = pip->linkspeed_portstate & 0xF;
426 flags = ipath_layer_get_flags(dev->dd); 530 flags = dev->dd->ipath_flags;
427 lstate = (pip->portphysstate_linkdown >> 4) & 0xF; 531 lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
428 if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP)) 532 if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
429 goto err; 533 goto err;
@@ -439,7 +543,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
439 /* FALLTHROUGH */ 543 /* FALLTHROUGH */
440 case IB_PORT_DOWN: 544 case IB_PORT_DOWN:
441 if (lstate == 0) 545 if (lstate == 0)
442 if (ipath_layer_get_linkdowndefaultstate(dev->dd)) 546 if (get_linkdowndefaultstate(dev->dd))
443 lstate = IPATH_IB_LINKDOWN_SLEEP; 547 lstate = IPATH_IB_LINKDOWN_SLEEP;
444 else 548 else
445 lstate = IPATH_IB_LINKDOWN; 549 lstate = IPATH_IB_LINKDOWN;
@@ -451,7 +555,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
451 lstate = IPATH_IB_LINKDOWN_DISABLE; 555 lstate = IPATH_IB_LINKDOWN_DISABLE;
452 else 556 else
453 goto err; 557 goto err;
454 ipath_layer_set_linkstate(dev->dd, lstate); 558 ipath_set_linkstate(dev->dd, lstate);
455 if (flags & IPATH_LINKACTIVE) { 559 if (flags & IPATH_LINKACTIVE) {
456 event.event = IB_EVENT_PORT_ERR; 560 event.event = IB_EVENT_PORT_ERR;
457 ib_dispatch_event(&event); 561 ib_dispatch_event(&event);
@@ -460,7 +564,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
460 case IB_PORT_ARMED: 564 case IB_PORT_ARMED:
461 if (!(flags & (IPATH_LINKINIT | IPATH_LINKACTIVE))) 565 if (!(flags & (IPATH_LINKINIT | IPATH_LINKACTIVE)))
462 break; 566 break;
463 ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKARM); 567 ipath_set_linkstate(dev->dd, IPATH_IB_LINKARM);
464 if (flags & IPATH_LINKACTIVE) { 568 if (flags & IPATH_LINKACTIVE) {
465 event.event = IB_EVENT_PORT_ERR; 569 event.event = IB_EVENT_PORT_ERR;
466 ib_dispatch_event(&event); 570 ib_dispatch_event(&event);
@@ -469,7 +573,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
469 case IB_PORT_ACTIVE: 573 case IB_PORT_ACTIVE:
470 if (!(flags & IPATH_LINKARMED)) 574 if (!(flags & IPATH_LINKARMED))
471 break; 575 break;
472 ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKACTIVE); 576 ipath_set_linkstate(dev->dd, IPATH_IB_LINKACTIVE);
473 event.event = IB_EVENT_PORT_ACTIVE; 577 event.event = IB_EVENT_PORT_ACTIVE;
474 ib_dispatch_event(&event); 578 ib_dispatch_event(&event);
475 break; 579 break;
@@ -493,6 +597,152 @@ done:
493 return ret; 597 return ret;
494} 598}
495 599
600/**
601 * rm_pkey - decrecment the reference count for the given PKEY
602 * @dd: the infinipath device
603 * @key: the PKEY index
604 *
605 * Return true if this was the last reference and the hardware table entry
606 * needs to be changed.
607 */
608static int rm_pkey(struct ipath_devdata *dd, u16 key)
609{
610 int i;
611 int ret;
612
613 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
614 if (dd->ipath_pkeys[i] != key)
615 continue;
616 if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
617 dd->ipath_pkeys[i] = 0;
618 ret = 1;
619 goto bail;
620 }
621 break;
622 }
623
624 ret = 0;
625
626bail:
627 return ret;
628}
629
630/**
631 * add_pkey - add the given PKEY to the hardware table
632 * @dd: the infinipath device
633 * @key: the PKEY
634 *
635 * Return an error code if unable to add the entry, zero if no change,
636 * or 1 if the hardware PKEY register needs to be updated.
637 */
638static int add_pkey(struct ipath_devdata *dd, u16 key)
639{
640 int i;
641 u16 lkey = key & 0x7FFF;
642 int any = 0;
643 int ret;
644
645 if (lkey == 0x7FFF) {
646 ret = 0;
647 goto bail;
648 }
649
650 /* Look for an empty slot or a matching PKEY. */
651 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
652 if (!dd->ipath_pkeys[i]) {
653 any++;
654 continue;
655 }
656 /* If it matches exactly, try to increment the ref count */
657 if (dd->ipath_pkeys[i] == key) {
658 if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
659 ret = 0;
660 goto bail;
661 }
662 /* Lost the race. Look for an empty slot below. */
663 atomic_dec(&dd->ipath_pkeyrefs[i]);
664 any++;
665 }
666 /*
667 * It makes no sense to have both the limited and unlimited
668 * PKEY set at the same time since the unlimited one will
669 * disable the limited one.
670 */
671 if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
672 ret = -EEXIST;
673 goto bail;
674 }
675 }
676 if (!any) {
677 ret = -EBUSY;
678 goto bail;
679 }
680 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
681 if (!dd->ipath_pkeys[i] &&
682 atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
683 /* for ipathstats, etc. */
684 ipath_stats.sps_pkeys[i] = lkey;
685 dd->ipath_pkeys[i] = key;
686 ret = 1;
687 goto bail;
688 }
689 }
690 ret = -EBUSY;
691
692bail:
693 return ret;
694}
695
696/**
697 * set_pkeys - set the PKEY table for port 0
698 * @dd: the infinipath device
699 * @pkeys: the PKEY table
700 */
701static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys)
702{
703 struct ipath_portdata *pd;
704 int i;
705 int changed = 0;
706
707 pd = dd->ipath_pd[0];
708
709 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
710 u16 key = pkeys[i];
711 u16 okey = pd->port_pkeys[i];
712
713 if (key == okey)
714 continue;
715 /*
716 * The value of this PKEY table entry is changing.
717 * Remove the old entry in the hardware's array of PKEYs.
718 */
719 if (okey & 0x7FFF)
720 changed |= rm_pkey(dd, okey);
721 if (key & 0x7FFF) {
722 int ret = add_pkey(dd, key);
723
724 if (ret < 0)
725 key = 0;
726 else
727 changed |= ret;
728 }
729 pd->port_pkeys[i] = key;
730 }
731 if (changed) {
732 u64 pkey;
733
734 pkey = (u64) dd->ipath_pkeys[0] |
735 ((u64) dd->ipath_pkeys[1] << 16) |
736 ((u64) dd->ipath_pkeys[2] << 32) |
737 ((u64) dd->ipath_pkeys[3] << 48);
738 ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
739 (unsigned long long) pkey);
740 ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
741 pkey);
742 }
743 return 0;
744}
745
496static int recv_subn_set_pkeytable(struct ib_smp *smp, 746static int recv_subn_set_pkeytable(struct ib_smp *smp,
497 struct ib_device *ibdev) 747 struct ib_device *ibdev)
498{ 748{
@@ -500,13 +750,12 @@ static int recv_subn_set_pkeytable(struct ib_smp *smp,
500 __be16 *p = (__be16 *) smp->data; 750 __be16 *p = (__be16 *) smp->data;
501 u16 *q = (u16 *) smp->data; 751 u16 *q = (u16 *) smp->data;
502 struct ipath_ibdev *dev = to_idev(ibdev); 752 struct ipath_ibdev *dev = to_idev(ibdev);
503 unsigned i, n = ipath_layer_get_npkeys(dev->dd); 753 unsigned i, n = ipath_get_npkeys(dev->dd);
504 754
505 for (i = 0; i < n; i++) 755 for (i = 0; i < n; i++)
506 q[i] = be16_to_cpu(p[i]); 756 q[i] = be16_to_cpu(p[i]);
507 757
508 if (startpx != 0 || 758 if (startpx != 0 || set_pkeys(dev->dd, q) != 0)
509 ipath_layer_set_pkeys(dev->dd, q) != 0)
510 smp->status |= IB_SMP_INVALID_FIELD; 759 smp->status |= IB_SMP_INVALID_FIELD;
511 760
512 return recv_subn_get_pkeytable(smp, ibdev); 761 return recv_subn_get_pkeytable(smp, ibdev);
@@ -844,10 +1093,10 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
844 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) 1093 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
845 pmp->data; 1094 pmp->data;
846 struct ipath_ibdev *dev = to_idev(ibdev); 1095 struct ipath_ibdev *dev = to_idev(ibdev);
847 struct ipath_layer_counters cntrs; 1096 struct ipath_verbs_counters cntrs;
848 u8 port_select = p->port_select; 1097 u8 port_select = p->port_select;
849 1098
850 ipath_layer_get_counters(dev->dd, &cntrs); 1099 ipath_get_counters(dev->dd, &cntrs);
851 1100
852 /* Adjust counters for any resets done. */ 1101 /* Adjust counters for any resets done. */
853 cntrs.symbol_error_counter -= dev->z_symbol_error_counter; 1102 cntrs.symbol_error_counter -= dev->z_symbol_error_counter;
@@ -944,8 +1193,8 @@ static int recv_pma_get_portcounters_ext(struct ib_perf *pmp,
944 u64 swords, rwords, spkts, rpkts, xwait; 1193 u64 swords, rwords, spkts, rpkts, xwait;
945 u8 port_select = p->port_select; 1194 u8 port_select = p->port_select;
946 1195
947 ipath_layer_snapshot_counters(dev->dd, &swords, &rwords, &spkts, 1196 ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
948 &rpkts, &xwait); 1197 &rpkts, &xwait);
949 1198
950 /* Adjust counters for any resets done. */ 1199 /* Adjust counters for any resets done. */
951 swords -= dev->z_port_xmit_data; 1200 swords -= dev->z_port_xmit_data;
@@ -978,13 +1227,13 @@ static int recv_pma_set_portcounters(struct ib_perf *pmp,
978 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) 1227 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
979 pmp->data; 1228 pmp->data;
980 struct ipath_ibdev *dev = to_idev(ibdev); 1229 struct ipath_ibdev *dev = to_idev(ibdev);
981 struct ipath_layer_counters cntrs; 1230 struct ipath_verbs_counters cntrs;
982 1231
983 /* 1232 /*
984 * Since the HW doesn't support clearing counters, we save the 1233 * Since the HW doesn't support clearing counters, we save the
985 * current count and subtract it from future responses. 1234 * current count and subtract it from future responses.
986 */ 1235 */
987 ipath_layer_get_counters(dev->dd, &cntrs); 1236 ipath_get_counters(dev->dd, &cntrs);
988 1237
989 if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR) 1238 if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
990 dev->z_symbol_error_counter = cntrs.symbol_error_counter; 1239 dev->z_symbol_error_counter = cntrs.symbol_error_counter;
@@ -1041,8 +1290,8 @@ static int recv_pma_set_portcounters_ext(struct ib_perf *pmp,
1041 struct ipath_ibdev *dev = to_idev(ibdev); 1290 struct ipath_ibdev *dev = to_idev(ibdev);
1042 u64 swords, rwords, spkts, rpkts, xwait; 1291 u64 swords, rwords, spkts, rpkts, xwait;
1043 1292
1044 ipath_layer_snapshot_counters(dev->dd, &swords, &rwords, &spkts, 1293 ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
1045 &rpkts, &xwait); 1294 &rpkts, &xwait);
1046 1295
1047 if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA) 1296 if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
1048 dev->z_port_xmit_data = swords; 1297 dev->z_port_xmit_data = swords;
diff --git a/drivers/infiniband/hw/ipath/ipath_mmap.c b/drivers/infiniband/hw/ipath/ipath_mmap.c
new file mode 100644
index 000000000000..11b7378ff214
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_mmap.c
@@ -0,0 +1,122 @@
1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/config.h>
34#include <linux/module.h>
35#include <linux/vmalloc.h>
36#include <linux/mm.h>
37#include <linux/errno.h>
38#include <asm/pgtable.h>
39
40#include "ipath_verbs.h"
41
42/**
43 * ipath_release_mmap_info - free mmap info structure
44 * @ref: a pointer to the kref within struct ipath_mmap_info
45 */
46void ipath_release_mmap_info(struct kref *ref)
47{
48 struct ipath_mmap_info *ip =
49 container_of(ref, struct ipath_mmap_info, ref);
50
51 vfree(ip->obj);
52 kfree(ip);
53}
54
55/*
56 * open and close keep track of how many times the CQ is mapped,
57 * to avoid releasing it.
58 */
59static void ipath_vma_open(struct vm_area_struct *vma)
60{
61 struct ipath_mmap_info *ip = vma->vm_private_data;
62
63 kref_get(&ip->ref);
64 ip->mmap_cnt++;
65}
66
67static void ipath_vma_close(struct vm_area_struct *vma)
68{
69 struct ipath_mmap_info *ip = vma->vm_private_data;
70
71 ip->mmap_cnt--;
72 kref_put(&ip->ref, ipath_release_mmap_info);
73}
74
75static struct vm_operations_struct ipath_vm_ops = {
76 .open = ipath_vma_open,
77 .close = ipath_vma_close,
78};
79
80/**
81 * ipath_mmap - create a new mmap region
82 * @context: the IB user context of the process making the mmap() call
83 * @vma: the VMA to be initialized
84 * Return zero if the mmap is OK. Otherwise, return an errno.
85 */
86int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
87{
88 struct ipath_ibdev *dev = to_idev(context->device);
89 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
90 unsigned long size = vma->vm_end - vma->vm_start;
91 struct ipath_mmap_info *ip, **pp;
92 int ret = -EINVAL;
93
94 /*
95 * Search the device's list of objects waiting for a mmap call.
96 * Normally, this list is very short since a call to create a
97 * CQ, QP, or SRQ is soon followed by a call to mmap().
98 */
99 spin_lock_irq(&dev->pending_lock);
100 for (pp = &dev->pending_mmaps; (ip = *pp); pp = &ip->next) {
101 /* Only the creator is allowed to mmap the object */
102 if (context != ip->context || (void *) offset != ip->obj)
103 continue;
104 /* Don't allow a mmap larger than the object. */
105 if (size > ip->size)
106 break;
107
108 *pp = ip->next;
109 spin_unlock_irq(&dev->pending_lock);
110
111 ret = remap_vmalloc_range(vma, ip->obj, 0);
112 if (ret)
113 goto done;
114 vma->vm_ops = &ipath_vm_ops;
115 vma->vm_private_data = ip;
116 ipath_vma_open(vma);
117 goto done;
118 }
119 spin_unlock_irq(&dev->pending_lock);
120done:
121 return ret;
122}
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
index 4ac31a5da330..b36f6fb3e37a 100644
--- a/drivers/infiniband/hw/ipath/ipath_mr.c
+++ b/drivers/infiniband/hw/ipath/ipath_mr.c
@@ -36,6 +36,18 @@
36 36
37#include "ipath_verbs.h" 37#include "ipath_verbs.h"
38 38
39/* Fast memory region */
40struct ipath_fmr {
41 struct ib_fmr ibfmr;
42 u8 page_shift;
43 struct ipath_mregion mr; /* must be last */
44};
45
46static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr)
47{
48 return container_of(ibfmr, struct ipath_fmr, ibfmr);
49}
50
39/** 51/**
40 * ipath_get_dma_mr - get a DMA memory region 52 * ipath_get_dma_mr - get a DMA memory region
41 * @pd: protection domain for this memory region 53 * @pd: protection domain for this memory region
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 83e557be591e..224b0f40767f 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -35,7 +35,7 @@
35#include <linux/vmalloc.h> 35#include <linux/vmalloc.h>
36 36
37#include "ipath_verbs.h" 37#include "ipath_verbs.h"
38#include "ipath_common.h" 38#include "ipath_kernel.h"
39 39
40#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE) 40#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
41#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) 41#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
@@ -44,19 +44,6 @@
44#define find_next_offset(map, off) find_next_zero_bit((map)->page, \ 44#define find_next_offset(map, off) find_next_zero_bit((map)->page, \
45 BITS_PER_PAGE, off) 45 BITS_PER_PAGE, off)
46 46
47#define TRANS_INVALID 0
48#define TRANS_ANY2RST 1
49#define TRANS_RST2INIT 2
50#define TRANS_INIT2INIT 3
51#define TRANS_INIT2RTR 4
52#define TRANS_RTR2RTS 5
53#define TRANS_RTS2RTS 6
54#define TRANS_SQERR2RTS 7
55#define TRANS_ANY2ERR 8
56#define TRANS_RTS2SQD 9 /* XXX Wait for expected ACKs & signal event */
57#define TRANS_SQD2SQD 10 /* error if not drained & parameter change */
58#define TRANS_SQD2RTS 11 /* error if not drained */
59
60/* 47/*
61 * Convert the AETH credit code into the number of credits. 48 * Convert the AETH credit code into the number of credits.
62 */ 49 */
@@ -287,7 +274,7 @@ void ipath_free_all_qps(struct ipath_qp_table *qpt)
287 free_qpn(qpt, qp->ibqp.qp_num); 274 free_qpn(qpt, qp->ibqp.qp_num);
288 if (!atomic_dec_and_test(&qp->refcount) || 275 if (!atomic_dec_and_test(&qp->refcount) ||
289 !ipath_destroy_qp(&qp->ibqp)) 276 !ipath_destroy_qp(&qp->ibqp))
290 _VERBS_INFO("QP memory leak!\n"); 277 ipath_dbg(KERN_INFO "QP memory leak!\n");
291 qp = nqp; 278 qp = nqp;
292 } 279 }
293 } 280 }
@@ -355,8 +342,10 @@ static void ipath_reset_qp(struct ipath_qp *qp)
355 qp->s_last = 0; 342 qp->s_last = 0;
356 qp->s_ssn = 1; 343 qp->s_ssn = 1;
357 qp->s_lsn = 0; 344 qp->s_lsn = 0;
358 qp->r_rq.head = 0; 345 if (qp->r_rq.wq) {
359 qp->r_rq.tail = 0; 346 qp->r_rq.wq->head = 0;
347 qp->r_rq.wq->tail = 0;
348 }
360 qp->r_reuse_sge = 0; 349 qp->r_reuse_sge = 0;
361} 350}
362 351
@@ -373,8 +362,8 @@ void ipath_error_qp(struct ipath_qp *qp)
373 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 362 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
374 struct ib_wc wc; 363 struct ib_wc wc;
375 364
376 _VERBS_INFO("QP%d/%d in error state\n", 365 ipath_dbg(KERN_INFO "QP%d/%d in error state\n",
377 qp->ibqp.qp_num, qp->remote_qpn); 366 qp->ibqp.qp_num, qp->remote_qpn);
378 367
379 spin_lock(&dev->pending_lock); 368 spin_lock(&dev->pending_lock);
380 /* XXX What if its already removed by the timeout code? */ 369 /* XXX What if its already removed by the timeout code? */
@@ -410,15 +399,32 @@ void ipath_error_qp(struct ipath_qp *qp)
410 qp->s_hdrwords = 0; 399 qp->s_hdrwords = 0;
411 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; 400 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
412 401
413 wc.opcode = IB_WC_RECV; 402 if (qp->r_rq.wq) {
414 spin_lock(&qp->r_rq.lock); 403 struct ipath_rwq *wq;
415 while (qp->r_rq.tail != qp->r_rq.head) { 404 u32 head;
416 wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id; 405 u32 tail;
417 if (++qp->r_rq.tail >= qp->r_rq.size) 406
418 qp->r_rq.tail = 0; 407 spin_lock(&qp->r_rq.lock);
419 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); 408
409 /* sanity check pointers before trusting them */
410 wq = qp->r_rq.wq;
411 head = wq->head;
412 if (head >= qp->r_rq.size)
413 head = 0;
414 tail = wq->tail;
415 if (tail >= qp->r_rq.size)
416 tail = 0;
417 wc.opcode = IB_WC_RECV;
418 while (tail != head) {
419 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
420 if (++tail >= qp->r_rq.size)
421 tail = 0;
422 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
423 }
424 wq->tail = tail;
425
426 spin_unlock(&qp->r_rq.lock);
420 } 427 }
421 spin_unlock(&qp->r_rq.lock);
422} 428}
423 429
424/** 430/**
@@ -426,11 +432,12 @@ void ipath_error_qp(struct ipath_qp *qp)
426 * @ibqp: the queue pair who's attributes we're modifying 432 * @ibqp: the queue pair who's attributes we're modifying
427 * @attr: the new attributes 433 * @attr: the new attributes
428 * @attr_mask: the mask of attributes to modify 434 * @attr_mask: the mask of attributes to modify
435 * @udata: user data for ipathverbs.so
429 * 436 *
430 * Returns 0 on success, otherwise returns an errno. 437 * Returns 0 on success, otherwise returns an errno.
431 */ 438 */
432int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 439int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
433 int attr_mask) 440 int attr_mask, struct ib_udata *udata)
434{ 441{
435 struct ipath_ibdev *dev = to_idev(ibqp->device); 442 struct ipath_ibdev *dev = to_idev(ibqp->device);
436 struct ipath_qp *qp = to_iqp(ibqp); 443 struct ipath_qp *qp = to_iqp(ibqp);
@@ -448,19 +455,46 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
448 attr_mask)) 455 attr_mask))
449 goto inval; 456 goto inval;
450 457
451 if (attr_mask & IB_QP_AV) 458 if (attr_mask & IB_QP_AV) {
452 if (attr->ah_attr.dlid == 0 || 459 if (attr->ah_attr.dlid == 0 ||
453 attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE) 460 attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE)
454 goto inval; 461 goto inval;
455 462
463 if ((attr->ah_attr.ah_flags & IB_AH_GRH) &&
464 (attr->ah_attr.grh.sgid_index > 1))
465 goto inval;
466 }
467
456 if (attr_mask & IB_QP_PKEY_INDEX) 468 if (attr_mask & IB_QP_PKEY_INDEX)
457 if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd)) 469 if (attr->pkey_index >= ipath_get_npkeys(dev->dd))
458 goto inval; 470 goto inval;
459 471
460 if (attr_mask & IB_QP_MIN_RNR_TIMER) 472 if (attr_mask & IB_QP_MIN_RNR_TIMER)
461 if (attr->min_rnr_timer > 31) 473 if (attr->min_rnr_timer > 31)
462 goto inval; 474 goto inval;
463 475
476 if (attr_mask & IB_QP_PORT)
477 if (attr->port_num == 0 ||
478 attr->port_num > ibqp->device->phys_port_cnt)
479 goto inval;
480
481 if (attr_mask & IB_QP_PATH_MTU)
482 if (attr->path_mtu > IB_MTU_4096)
483 goto inval;
484
485 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
486 if (attr->max_dest_rd_atomic > 1)
487 goto inval;
488
489 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
490 if (attr->max_rd_atomic > 1)
491 goto inval;
492
493 if (attr_mask & IB_QP_PATH_MIG_STATE)
494 if (attr->path_mig_state != IB_MIG_MIGRATED &&
495 attr->path_mig_state != IB_MIG_REARM)
496 goto inval;
497
464 switch (new_state) { 498 switch (new_state) {
465 case IB_QPS_RESET: 499 case IB_QPS_RESET:
466 ipath_reset_qp(qp); 500 ipath_reset_qp(qp);
@@ -511,6 +545,9 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
511 if (attr_mask & IB_QP_MIN_RNR_TIMER) 545 if (attr_mask & IB_QP_MIN_RNR_TIMER)
512 qp->r_min_rnr_timer = attr->min_rnr_timer; 546 qp->r_min_rnr_timer = attr->min_rnr_timer;
513 547
548 if (attr_mask & IB_QP_TIMEOUT)
549 qp->timeout = attr->timeout;
550
514 if (attr_mask & IB_QP_QKEY) 551 if (attr_mask & IB_QP_QKEY)
515 qp->qkey = attr->qkey; 552 qp->qkey = attr->qkey;
516 553
@@ -543,7 +580,7 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
543 attr->dest_qp_num = qp->remote_qpn; 580 attr->dest_qp_num = qp->remote_qpn;
544 attr->qp_access_flags = qp->qp_access_flags; 581 attr->qp_access_flags = qp->qp_access_flags;
545 attr->cap.max_send_wr = qp->s_size - 1; 582 attr->cap.max_send_wr = qp->s_size - 1;
546 attr->cap.max_recv_wr = qp->r_rq.size - 1; 583 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
547 attr->cap.max_send_sge = qp->s_max_sge; 584 attr->cap.max_send_sge = qp->s_max_sge;
548 attr->cap.max_recv_sge = qp->r_rq.max_sge; 585 attr->cap.max_recv_sge = qp->r_rq.max_sge;
549 attr->cap.max_inline_data = 0; 586 attr->cap.max_inline_data = 0;
@@ -557,7 +594,7 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
557 attr->max_dest_rd_atomic = 1; 594 attr->max_dest_rd_atomic = 1;
558 attr->min_rnr_timer = qp->r_min_rnr_timer; 595 attr->min_rnr_timer = qp->r_min_rnr_timer;
559 attr->port_num = 1; 596 attr->port_num = 1;
560 attr->timeout = 0; 597 attr->timeout = qp->timeout;
561 attr->retry_cnt = qp->s_retry_cnt; 598 attr->retry_cnt = qp->s_retry_cnt;
562 attr->rnr_retry = qp->s_rnr_retry; 599 attr->rnr_retry = qp->s_rnr_retry;
563 attr->alt_port_num = 0; 600 attr->alt_port_num = 0;
@@ -569,9 +606,10 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
569 init_attr->recv_cq = qp->ibqp.recv_cq; 606 init_attr->recv_cq = qp->ibqp.recv_cq;
570 init_attr->srq = qp->ibqp.srq; 607 init_attr->srq = qp->ibqp.srq;
571 init_attr->cap = attr->cap; 608 init_attr->cap = attr->cap;
572 init_attr->sq_sig_type = 609 if (qp->s_flags & (1 << IPATH_S_SIGNAL_REQ_WR))
573 (qp->s_flags & (1 << IPATH_S_SIGNAL_REQ_WR)) 610 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
574 ? IB_SIGNAL_REQ_WR : 0; 611 else
612 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
575 init_attr->qp_type = qp->ibqp.qp_type; 613 init_attr->qp_type = qp->ibqp.qp_type;
576 init_attr->port_num = 1; 614 init_attr->port_num = 1;
577 return 0; 615 return 0;
@@ -596,13 +634,23 @@ __be32 ipath_compute_aeth(struct ipath_qp *qp)
596 } else { 634 } else {
597 u32 min, max, x; 635 u32 min, max, x;
598 u32 credits; 636 u32 credits;
599 637 struct ipath_rwq *wq = qp->r_rq.wq;
638 u32 head;
639 u32 tail;
640
641 /* sanity check pointers before trusting them */
642 head = wq->head;
643 if (head >= qp->r_rq.size)
644 head = 0;
645 tail = wq->tail;
646 if (tail >= qp->r_rq.size)
647 tail = 0;
600 /* 648 /*
601 * Compute the number of credits available (RWQEs). 649 * Compute the number of credits available (RWQEs).
602 * XXX Not holding the r_rq.lock here so there is a small 650 * XXX Not holding the r_rq.lock here so there is a small
603 * chance that the pair of reads are not atomic. 651 * chance that the pair of reads are not atomic.
604 */ 652 */
605 credits = qp->r_rq.head - qp->r_rq.tail; 653 credits = head - tail;
606 if ((int)credits < 0) 654 if ((int)credits < 0)
607 credits += qp->r_rq.size; 655 credits += qp->r_rq.size;
608 /* 656 /*
@@ -679,27 +727,37 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
679 case IB_QPT_UD: 727 case IB_QPT_UD:
680 case IB_QPT_SMI: 728 case IB_QPT_SMI:
681 case IB_QPT_GSI: 729 case IB_QPT_GSI:
682 qp = kmalloc(sizeof(*qp), GFP_KERNEL); 730 sz = sizeof(*qp);
731 if (init_attr->srq) {
732 struct ipath_srq *srq = to_isrq(init_attr->srq);
733
734 sz += sizeof(*qp->r_sg_list) *
735 srq->rq.max_sge;
736 } else
737 sz += sizeof(*qp->r_sg_list) *
738 init_attr->cap.max_recv_sge;
739 qp = kmalloc(sz, GFP_KERNEL);
683 if (!qp) { 740 if (!qp) {
684 vfree(swq);
685 ret = ERR_PTR(-ENOMEM); 741 ret = ERR_PTR(-ENOMEM);
686 goto bail; 742 goto bail_swq;
687 } 743 }
688 if (init_attr->srq) { 744 if (init_attr->srq) {
745 sz = 0;
689 qp->r_rq.size = 0; 746 qp->r_rq.size = 0;
690 qp->r_rq.max_sge = 0; 747 qp->r_rq.max_sge = 0;
691 qp->r_rq.wq = NULL; 748 qp->r_rq.wq = NULL;
749 init_attr->cap.max_recv_wr = 0;
750 init_attr->cap.max_recv_sge = 0;
692 } else { 751 } else {
693 qp->r_rq.size = init_attr->cap.max_recv_wr + 1; 752 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
694 qp->r_rq.max_sge = init_attr->cap.max_recv_sge; 753 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
695 sz = (sizeof(struct ipath_sge) * qp->r_rq.max_sge) + 754 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
696 sizeof(struct ipath_rwqe); 755 sizeof(struct ipath_rwqe);
697 qp->r_rq.wq = vmalloc(qp->r_rq.size * sz); 756 qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) +
757 qp->r_rq.size * sz);
698 if (!qp->r_rq.wq) { 758 if (!qp->r_rq.wq) {
699 kfree(qp);
700 vfree(swq);
701 ret = ERR_PTR(-ENOMEM); 759 ret = ERR_PTR(-ENOMEM);
702 goto bail; 760 goto bail_qp;
703 } 761 }
704 } 762 }
705 763
@@ -719,24 +777,19 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
719 qp->s_wq = swq; 777 qp->s_wq = swq;
720 qp->s_size = init_attr->cap.max_send_wr + 1; 778 qp->s_size = init_attr->cap.max_send_wr + 1;
721 qp->s_max_sge = init_attr->cap.max_send_sge; 779 qp->s_max_sge = init_attr->cap.max_send_sge;
722 qp->s_flags = init_attr->sq_sig_type == IB_SIGNAL_REQ_WR ? 780 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
723 1 << IPATH_S_SIGNAL_REQ_WR : 0; 781 qp->s_flags = 1 << IPATH_S_SIGNAL_REQ_WR;
782 else
783 qp->s_flags = 0;
724 dev = to_idev(ibpd->device); 784 dev = to_idev(ibpd->device);
725 err = ipath_alloc_qpn(&dev->qp_table, qp, 785 err = ipath_alloc_qpn(&dev->qp_table, qp,
726 init_attr->qp_type); 786 init_attr->qp_type);
727 if (err) { 787 if (err) {
728 vfree(swq);
729 vfree(qp->r_rq.wq);
730 kfree(qp);
731 ret = ERR_PTR(err); 788 ret = ERR_PTR(err);
732 goto bail; 789 goto bail_rwq;
733 } 790 }
791 qp->ip = NULL;
734 ipath_reset_qp(qp); 792 ipath_reset_qp(qp);
735
736 /* Tell the core driver that the kernel SMA is present. */
737 if (init_attr->qp_type == IB_QPT_SMI)
738 ipath_layer_set_verbs_flags(dev->dd,
739 IPATH_VERBS_KERNEL_SMA);
740 break; 793 break;
741 794
742 default: 795 default:
@@ -747,8 +800,63 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
747 800
748 init_attr->cap.max_inline_data = 0; 801 init_attr->cap.max_inline_data = 0;
749 802
803 /*
804 * Return the address of the RWQ as the offset to mmap.
805 * See ipath_mmap() for details.
806 */
807 if (udata && udata->outlen >= sizeof(__u64)) {
808 struct ipath_mmap_info *ip;
809 __u64 offset = (__u64) qp->r_rq.wq;
810 int err;
811
812 err = ib_copy_to_udata(udata, &offset, sizeof(offset));
813 if (err) {
814 ret = ERR_PTR(err);
815 goto bail_rwq;
816 }
817
818 if (qp->r_rq.wq) {
819 /* Allocate info for ipath_mmap(). */
820 ip = kmalloc(sizeof(*ip), GFP_KERNEL);
821 if (!ip) {
822 ret = ERR_PTR(-ENOMEM);
823 goto bail_rwq;
824 }
825 qp->ip = ip;
826 ip->context = ibpd->uobject->context;
827 ip->obj = qp->r_rq.wq;
828 kref_init(&ip->ref);
829 ip->mmap_cnt = 0;
830 ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
831 qp->r_rq.size * sz);
832 spin_lock_irq(&dev->pending_lock);
833 ip->next = dev->pending_mmaps;
834 dev->pending_mmaps = ip;
835 spin_unlock_irq(&dev->pending_lock);
836 }
837 }
838
839 spin_lock(&dev->n_qps_lock);
840 if (dev->n_qps_allocated == ib_ipath_max_qps) {
841 spin_unlock(&dev->n_qps_lock);
842 ret = ERR_PTR(-ENOMEM);
843 goto bail_ip;
844 }
845
846 dev->n_qps_allocated++;
847 spin_unlock(&dev->n_qps_lock);
848
750 ret = &qp->ibqp; 849 ret = &qp->ibqp;
850 goto bail;
751 851
852bail_ip:
853 kfree(qp->ip);
854bail_rwq:
855 vfree(qp->r_rq.wq);
856bail_qp:
857 kfree(qp);
858bail_swq:
859 vfree(swq);
752bail: 860bail:
753 return ret; 861 return ret;
754} 862}
@@ -768,15 +876,12 @@ int ipath_destroy_qp(struct ib_qp *ibqp)
768 struct ipath_ibdev *dev = to_idev(ibqp->device); 876 struct ipath_ibdev *dev = to_idev(ibqp->device);
769 unsigned long flags; 877 unsigned long flags;
770 878
771 /* Tell the core driver that the kernel SMA is gone. */ 879 spin_lock_irqsave(&qp->s_lock, flags);
772 if (qp->ibqp.qp_type == IB_QPT_SMI)
773 ipath_layer_set_verbs_flags(dev->dd, 0);
774
775 spin_lock_irqsave(&qp->r_rq.lock, flags);
776 spin_lock(&qp->s_lock);
777 qp->state = IB_QPS_ERR; 880 qp->state = IB_QPS_ERR;
778 spin_unlock(&qp->s_lock); 881 spin_unlock_irqrestore(&qp->s_lock, flags);
779 spin_unlock_irqrestore(&qp->r_rq.lock, flags); 882 spin_lock(&dev->n_qps_lock);
883 dev->n_qps_allocated--;
884 spin_unlock(&dev->n_qps_lock);
780 885
781 /* Stop the sending tasklet. */ 886 /* Stop the sending tasklet. */
782 tasklet_kill(&qp->s_task); 887 tasklet_kill(&qp->s_task);
@@ -797,8 +902,11 @@ int ipath_destroy_qp(struct ib_qp *ibqp)
797 if (atomic_read(&qp->refcount) != 0) 902 if (atomic_read(&qp->refcount) != 0)
798 ipath_free_qp(&dev->qp_table, qp); 903 ipath_free_qp(&dev->qp_table, qp);
799 904
905 if (qp->ip)
906 kref_put(&qp->ip->ref, ipath_release_mmap_info);
907 else
908 vfree(qp->r_rq.wq);
800 vfree(qp->s_wq); 909 vfree(qp->s_wq);
801 vfree(qp->r_rq.wq);
802 kfree(qp); 910 kfree(qp);
803 return 0; 911 return 0;
804} 912}
@@ -850,8 +958,8 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
850 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 958 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
851 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); 959 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
852 960
853 _VERBS_INFO("Send queue error on QP%d/%d: err: %d\n", 961 ipath_dbg(KERN_INFO "Send queue error on QP%d/%d: err: %d\n",
854 qp->ibqp.qp_num, qp->remote_qpn, wc->status); 962 qp->ibqp.qp_num, qp->remote_qpn, wc->status);
855 963
856 spin_lock(&dev->pending_lock); 964 spin_lock(&dev->pending_lock);
857 /* XXX What if its already removed by the timeout code? */ 965 /* XXX What if its already removed by the timeout code? */
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 774d1615ce2f..a08654042c03 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -32,7 +32,7 @@
32 */ 32 */
33 33
34#include "ipath_verbs.h" 34#include "ipath_verbs.h"
35#include "ipath_common.h" 35#include "ipath_kernel.h"
36 36
37/* cut down ridiculously long IB macro names */ 37/* cut down ridiculously long IB macro names */
38#define OP(x) IB_OPCODE_RC_##x 38#define OP(x) IB_OPCODE_RC_##x
@@ -540,7 +540,7 @@ static void send_rc_ack(struct ipath_qp *qp)
540 lrh0 = IPATH_LRH_GRH; 540 lrh0 = IPATH_LRH_GRH;
541 } 541 }
542 /* read pkey_index w/o lock (its atomic) */ 542 /* read pkey_index w/o lock (its atomic) */
543 bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index); 543 bth0 = ipath_get_pkey(dev->dd, qp->s_pkey_index);
544 if (qp->r_nak_state) 544 if (qp->r_nak_state)
545 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) | 545 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
546 (qp->r_nak_state << 546 (qp->r_nak_state <<
@@ -557,7 +557,7 @@ static void send_rc_ack(struct ipath_qp *qp)
557 hdr.lrh[0] = cpu_to_be16(lrh0); 557 hdr.lrh[0] = cpu_to_be16(lrh0);
558 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); 558 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
559 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC); 559 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
560 hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd)); 560 hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid);
561 ohdr->bth[0] = cpu_to_be32(bth0); 561 ohdr->bth[0] = cpu_to_be32(bth0);
562 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); 562 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
563 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK); 563 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
@@ -1323,8 +1323,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1323 * the eager header buffer size to 56 bytes so the last 4 1323 * the eager header buffer size to 56 bytes so the last 4
1324 * bytes of the BTH header (PSN) is in the data buffer. 1324 * bytes of the BTH header (PSN) is in the data buffer.
1325 */ 1325 */
1326 header_in_data = 1326 header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
1327 ipath_layer_get_rcvhdrentsize(dev->dd) == 16;
1328 if (header_in_data) { 1327 if (header_in_data) {
1329 psn = be32_to_cpu(((__be32 *) data)[0]); 1328 psn = be32_to_cpu(((__be32 *) data)[0]);
1330 data += sizeof(__be32); 1329 data += sizeof(__be32);
diff --git a/drivers/infiniband/hw/ipath/ipath_registers.h b/drivers/infiniband/hw/ipath/ipath_registers.h
index 89df8f5ea998..6e23b3d632b8 100644
--- a/drivers/infiniband/hw/ipath/ipath_registers.h
+++ b/drivers/infiniband/hw/ipath/ipath_registers.h
@@ -36,8 +36,7 @@
36 36
37/* 37/*
38 * This file should only be included by kernel source, and by the diags. It 38 * This file should only be included by kernel source, and by the diags. It
39 * defines the registers, and their contents, for the InfiniPath HT-400 39 * defines the registers, and their contents, for InfiniPath chips.
40 * chip.
41 */ 40 */
42 41
43/* 42/*
@@ -283,10 +282,12 @@
283#define INFINIPATH_XGXS_RESET 0x7ULL 282#define INFINIPATH_XGXS_RESET 0x7ULL
284#define INFINIPATH_XGXS_MDIOADDR_MASK 0xfULL 283#define INFINIPATH_XGXS_MDIOADDR_MASK 0xfULL
285#define INFINIPATH_XGXS_MDIOADDR_SHIFT 4 284#define INFINIPATH_XGXS_MDIOADDR_SHIFT 4
285#define INFINIPATH_XGXS_RX_POL_SHIFT 19
286#define INFINIPATH_XGXS_RX_POL_MASK 0xfULL
286 287
287#define INFINIPATH_RT_ADDR_MASK 0xFFFFFFFFFFULL /* 40 bits valid */ 288#define INFINIPATH_RT_ADDR_MASK 0xFFFFFFFFFFULL /* 40 bits valid */
288 289
289/* TID entries (memory), HT400-only */ 290/* TID entries (memory), HT-only */
290#define INFINIPATH_RT_VALID 0x8000000000000000ULL 291#define INFINIPATH_RT_VALID 0x8000000000000000ULL
291#define INFINIPATH_RT_ADDR_SHIFT 0 292#define INFINIPATH_RT_ADDR_SHIFT 0
292#define INFINIPATH_RT_BUFSIZE_MASK 0x3FFF 293#define INFINIPATH_RT_BUFSIZE_MASK 0x3FFF
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index 772bc59fb85c..5c1da2d25e03 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -32,7 +32,7 @@
32 */ 32 */
33 33
34#include "ipath_verbs.h" 34#include "ipath_verbs.h"
35#include "ipath_common.h" 35#include "ipath_kernel.h"
36 36
37/* 37/*
38 * Convert the AETH RNR timeout code into the number of milliseconds. 38 * Convert the AETH RNR timeout code into the number of milliseconds.
@@ -106,6 +106,54 @@ void ipath_insert_rnr_queue(struct ipath_qp *qp)
106 spin_unlock_irqrestore(&dev->pending_lock, flags); 106 spin_unlock_irqrestore(&dev->pending_lock, flags);
107} 107}
108 108
109static int init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe)
110{
111 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
112 int user = to_ipd(qp->ibqp.pd)->user;
113 int i, j, ret;
114 struct ib_wc wc;
115
116 qp->r_len = 0;
117 for (i = j = 0; i < wqe->num_sge; i++) {
118 if (wqe->sg_list[i].length == 0)
119 continue;
120 /* Check LKEY */
121 if ((user && wqe->sg_list[i].lkey == 0) ||
122 !ipath_lkey_ok(&dev->lk_table,
123 &qp->r_sg_list[j], &wqe->sg_list[i],
124 IB_ACCESS_LOCAL_WRITE))
125 goto bad_lkey;
126 qp->r_len += wqe->sg_list[i].length;
127 j++;
128 }
129 qp->r_sge.sge = qp->r_sg_list[0];
130 qp->r_sge.sg_list = qp->r_sg_list + 1;
131 qp->r_sge.num_sge = j;
132 ret = 1;
133 goto bail;
134
135bad_lkey:
136 wc.wr_id = wqe->wr_id;
137 wc.status = IB_WC_LOC_PROT_ERR;
138 wc.opcode = IB_WC_RECV;
139 wc.vendor_err = 0;
140 wc.byte_len = 0;
141 wc.imm_data = 0;
142 wc.qp_num = qp->ibqp.qp_num;
143 wc.src_qp = 0;
144 wc.wc_flags = 0;
145 wc.pkey_index = 0;
146 wc.slid = 0;
147 wc.sl = 0;
148 wc.dlid_path_bits = 0;
149 wc.port_num = 0;
150 /* Signal solicited completion event. */
151 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
152 ret = 0;
153bail:
154 return ret;
155}
156
109/** 157/**
110 * ipath_get_rwqe - copy the next RWQE into the QP's RWQE 158 * ipath_get_rwqe - copy the next RWQE into the QP's RWQE
111 * @qp: the QP 159 * @qp: the QP
@@ -119,71 +167,71 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
119{ 167{
120 unsigned long flags; 168 unsigned long flags;
121 struct ipath_rq *rq; 169 struct ipath_rq *rq;
170 struct ipath_rwq *wq;
122 struct ipath_srq *srq; 171 struct ipath_srq *srq;
123 struct ipath_rwqe *wqe; 172 struct ipath_rwqe *wqe;
124 int ret = 1; 173 void (*handler)(struct ib_event *, void *);
174 u32 tail;
175 int ret;
125 176
126 if (!qp->ibqp.srq) { 177 if (qp->ibqp.srq) {
178 srq = to_isrq(qp->ibqp.srq);
179 handler = srq->ibsrq.event_handler;
180 rq = &srq->rq;
181 } else {
182 srq = NULL;
183 handler = NULL;
127 rq = &qp->r_rq; 184 rq = &qp->r_rq;
128 spin_lock_irqsave(&rq->lock, flags);
129
130 if (unlikely(rq->tail == rq->head)) {
131 ret = 0;
132 goto done;
133 }
134 wqe = get_rwqe_ptr(rq, rq->tail);
135 qp->r_wr_id = wqe->wr_id;
136 if (!wr_id_only) {
137 qp->r_sge.sge = wqe->sg_list[0];
138 qp->r_sge.sg_list = wqe->sg_list + 1;
139 qp->r_sge.num_sge = wqe->num_sge;
140 qp->r_len = wqe->length;
141 }
142 if (++rq->tail >= rq->size)
143 rq->tail = 0;
144 goto done;
145 } 185 }
146 186
147 srq = to_isrq(qp->ibqp.srq);
148 rq = &srq->rq;
149 spin_lock_irqsave(&rq->lock, flags); 187 spin_lock_irqsave(&rq->lock, flags);
150 188 wq = rq->wq;
151 if (unlikely(rq->tail == rq->head)) { 189 tail = wq->tail;
152 ret = 0; 190 /* Validate tail before using it since it is user writable. */
153 goto done; 191 if (tail >= rq->size)
154 } 192 tail = 0;
155 wqe = get_rwqe_ptr(rq, rq->tail); 193 do {
194 if (unlikely(tail == wq->head)) {
195 spin_unlock_irqrestore(&rq->lock, flags);
196 ret = 0;
197 goto bail;
198 }
199 wqe = get_rwqe_ptr(rq, tail);
200 if (++tail >= rq->size)
201 tail = 0;
202 } while (!wr_id_only && !init_sge(qp, wqe));
156 qp->r_wr_id = wqe->wr_id; 203 qp->r_wr_id = wqe->wr_id;
157 if (!wr_id_only) { 204 wq->tail = tail;
158 qp->r_sge.sge = wqe->sg_list[0]; 205
159 qp->r_sge.sg_list = wqe->sg_list + 1; 206 ret = 1;
160 qp->r_sge.num_sge = wqe->num_sge; 207 if (handler) {
161 qp->r_len = wqe->length;
162 }
163 if (++rq->tail >= rq->size)
164 rq->tail = 0;
165 if (srq->ibsrq.event_handler) {
166 struct ib_event ev;
167 u32 n; 208 u32 n;
168 209
169 if (rq->head < rq->tail) 210 /*
170 n = rq->size + rq->head - rq->tail; 211 * validate head pointer value and compute
212 * the number of remaining WQEs.
213 */
214 n = wq->head;
215 if (n >= rq->size)
216 n = 0;
217 if (n < tail)
218 n += rq->size - tail;
171 else 219 else
172 n = rq->head - rq->tail; 220 n -= tail;
173 if (n < srq->limit) { 221 if (n < srq->limit) {
222 struct ib_event ev;
223
174 srq->limit = 0; 224 srq->limit = 0;
175 spin_unlock_irqrestore(&rq->lock, flags); 225 spin_unlock_irqrestore(&rq->lock, flags);
176 ev.device = qp->ibqp.device; 226 ev.device = qp->ibqp.device;
177 ev.element.srq = qp->ibqp.srq; 227 ev.element.srq = qp->ibqp.srq;
178 ev.event = IB_EVENT_SRQ_LIMIT_REACHED; 228 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
179 srq->ibsrq.event_handler(&ev, 229 handler(&ev, srq->ibsrq.srq_context);
180 srq->ibsrq.srq_context);
181 goto bail; 230 goto bail;
182 } 231 }
183 } 232 }
184
185done:
186 spin_unlock_irqrestore(&rq->lock, flags); 233 spin_unlock_irqrestore(&rq->lock, flags);
234
187bail: 235bail:
188 return ret; 236 return ret;
189} 237}
@@ -422,6 +470,15 @@ done:
422 wake_up(&qp->wait); 470 wake_up(&qp->wait);
423} 471}
424 472
473static int want_buffer(struct ipath_devdata *dd)
474{
475 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
476 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
477 dd->ipath_sendctrl);
478
479 return 0;
480}
481
425/** 482/**
426 * ipath_no_bufs_available - tell the layer driver we need buffers 483 * ipath_no_bufs_available - tell the layer driver we need buffers
427 * @qp: the QP that caused the problem 484 * @qp: the QP that caused the problem
@@ -438,7 +495,7 @@ void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
438 list_add_tail(&qp->piowait, &dev->piowait); 495 list_add_tail(&qp->piowait, &dev->piowait);
439 spin_unlock_irqrestore(&dev->pending_lock, flags); 496 spin_unlock_irqrestore(&dev->pending_lock, flags);
440 /* 497 /*
441 * Note that as soon as ipath_layer_want_buffer() is called and 498 * Note that as soon as want_buffer() is called and
442 * possibly before it returns, ipath_ib_piobufavail() 499 * possibly before it returns, ipath_ib_piobufavail()
443 * could be called. If we are still in the tasklet function, 500 * could be called. If we are still in the tasklet function,
444 * tasklet_hi_schedule() will not call us until the next time 501 * tasklet_hi_schedule() will not call us until the next time
@@ -448,7 +505,7 @@ void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
448 */ 505 */
449 clear_bit(IPATH_S_BUSY, &qp->s_flags); 506 clear_bit(IPATH_S_BUSY, &qp->s_flags);
450 tasklet_unlock(&qp->s_task); 507 tasklet_unlock(&qp->s_task);
451 ipath_layer_want_buffer(dev->dd); 508 want_buffer(dev->dd);
452 dev->n_piowait++; 509 dev->n_piowait++;
453} 510}
454 511
@@ -563,7 +620,7 @@ u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
563 hdr->hop_limit = grh->hop_limit; 620 hdr->hop_limit = grh->hop_limit;
564 /* The SGID is 32-bit aligned. */ 621 /* The SGID is 32-bit aligned. */
565 hdr->sgid.global.subnet_prefix = dev->gid_prefix; 622 hdr->sgid.global.subnet_prefix = dev->gid_prefix;
566 hdr->sgid.global.interface_id = ipath_layer_get_guid(dev->dd); 623 hdr->sgid.global.interface_id = dev->dd->ipath_guid;
567 hdr->dgid = grh->dgid; 624 hdr->dgid = grh->dgid;
568 625
569 /* GRH header size in 32-bit words. */ 626 /* GRH header size in 32-bit words. */
@@ -595,8 +652,7 @@ void ipath_do_ruc_send(unsigned long data)
595 if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags)) 652 if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags))
596 goto bail; 653 goto bail;
597 654
598 if (unlikely(qp->remote_ah_attr.dlid == 655 if (unlikely(qp->remote_ah_attr.dlid == dev->dd->ipath_lid)) {
599 ipath_layer_get_lid(dev->dd))) {
600 ipath_ruc_loopback(qp); 656 ipath_ruc_loopback(qp);
601 goto clear; 657 goto clear;
602 } 658 }
@@ -663,8 +719,8 @@ again:
663 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); 719 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
664 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + 720 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords +
665 SIZE_OF_CRC); 721 SIZE_OF_CRC);
666 qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd)); 722 qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid);
667 bth0 |= ipath_layer_get_pkey(dev->dd, qp->s_pkey_index); 723 bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index);
668 bth0 |= extra_bytes << 20; 724 bth0 |= extra_bytes << 20;
669 ohdr->bth[0] = cpu_to_be32(bth0); 725 ohdr->bth[0] = cpu_to_be32(bth0);
670 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); 726 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c
index f760434660bd..941e866d9517 100644
--- a/drivers/infiniband/hw/ipath/ipath_srq.c
+++ b/drivers/infiniband/hw/ipath/ipath_srq.c
@@ -48,66 +48,39 @@ int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
48 struct ib_recv_wr **bad_wr) 48 struct ib_recv_wr **bad_wr)
49{ 49{
50 struct ipath_srq *srq = to_isrq(ibsrq); 50 struct ipath_srq *srq = to_isrq(ibsrq);
51 struct ipath_ibdev *dev = to_idev(ibsrq->device); 51 struct ipath_rwq *wq;
52 unsigned long flags; 52 unsigned long flags;
53 int ret; 53 int ret;
54 54
55 for (; wr; wr = wr->next) { 55 for (; wr; wr = wr->next) {
56 struct ipath_rwqe *wqe; 56 struct ipath_rwqe *wqe;
57 u32 next; 57 u32 next;
58 int i, j; 58 int i;
59 59
60 if (wr->num_sge > srq->rq.max_sge) { 60 if ((unsigned) wr->num_sge > srq->rq.max_sge) {
61 *bad_wr = wr; 61 *bad_wr = wr;
62 ret = -ENOMEM; 62 ret = -ENOMEM;
63 goto bail; 63 goto bail;
64 } 64 }
65 65
66 spin_lock_irqsave(&srq->rq.lock, flags); 66 spin_lock_irqsave(&srq->rq.lock, flags);
67 next = srq->rq.head + 1; 67 wq = srq->rq.wq;
68 next = wq->head + 1;
68 if (next >= srq->rq.size) 69 if (next >= srq->rq.size)
69 next = 0; 70 next = 0;
70 if (next == srq->rq.tail) { 71 if (next == wq->tail) {
71 spin_unlock_irqrestore(&srq->rq.lock, flags); 72 spin_unlock_irqrestore(&srq->rq.lock, flags);
72 *bad_wr = wr; 73 *bad_wr = wr;
73 ret = -ENOMEM; 74 ret = -ENOMEM;
74 goto bail; 75 goto bail;
75 } 76 }
76 77
77 wqe = get_rwqe_ptr(&srq->rq, srq->rq.head); 78 wqe = get_rwqe_ptr(&srq->rq, wq->head);
78 wqe->wr_id = wr->wr_id; 79 wqe->wr_id = wr->wr_id;
79 wqe->sg_list[0].mr = NULL; 80 wqe->num_sge = wr->num_sge;
80 wqe->sg_list[0].vaddr = NULL; 81 for (i = 0; i < wr->num_sge; i++)
81 wqe->sg_list[0].length = 0; 82 wqe->sg_list[i] = wr->sg_list[i];
82 wqe->sg_list[0].sge_length = 0; 83 wq->head = next;
83 wqe->length = 0;
84 for (i = 0, j = 0; i < wr->num_sge; i++) {
85 /* Check LKEY */
86 if (to_ipd(srq->ibsrq.pd)->user &&
87 wr->sg_list[i].lkey == 0) {
88 spin_unlock_irqrestore(&srq->rq.lock,
89 flags);
90 *bad_wr = wr;
91 ret = -EINVAL;
92 goto bail;
93 }
94 if (wr->sg_list[i].length == 0)
95 continue;
96 if (!ipath_lkey_ok(&dev->lk_table,
97 &wqe->sg_list[j],
98 &wr->sg_list[i],
99 IB_ACCESS_LOCAL_WRITE)) {
100 spin_unlock_irqrestore(&srq->rq.lock,
101 flags);
102 *bad_wr = wr;
103 ret = -EINVAL;
104 goto bail;
105 }
106 wqe->length += wr->sg_list[i].length;
107 j++;
108 }
109 wqe->num_sge = j;
110 srq->rq.head = next;
111 spin_unlock_irqrestore(&srq->rq.lock, flags); 84 spin_unlock_irqrestore(&srq->rq.lock, flags);
112 } 85 }
113 ret = 0; 86 ret = 0;
@@ -133,53 +106,95 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
133 106
134 if (dev->n_srqs_allocated == ib_ipath_max_srqs) { 107 if (dev->n_srqs_allocated == ib_ipath_max_srqs) {
135 ret = ERR_PTR(-ENOMEM); 108 ret = ERR_PTR(-ENOMEM);
136 goto bail; 109 goto done;
137 } 110 }
138 111
139 if (srq_init_attr->attr.max_wr == 0) { 112 if (srq_init_attr->attr.max_wr == 0) {
140 ret = ERR_PTR(-EINVAL); 113 ret = ERR_PTR(-EINVAL);
141 goto bail; 114 goto done;
142 } 115 }
143 116
144 if ((srq_init_attr->attr.max_sge > ib_ipath_max_srq_sges) || 117 if ((srq_init_attr->attr.max_sge > ib_ipath_max_srq_sges) ||
145 (srq_init_attr->attr.max_wr > ib_ipath_max_srq_wrs)) { 118 (srq_init_attr->attr.max_wr > ib_ipath_max_srq_wrs)) {
146 ret = ERR_PTR(-EINVAL); 119 ret = ERR_PTR(-EINVAL);
147 goto bail; 120 goto done;
148 } 121 }
149 122
150 srq = kmalloc(sizeof(*srq), GFP_KERNEL); 123 srq = kmalloc(sizeof(*srq), GFP_KERNEL);
151 if (!srq) { 124 if (!srq) {
152 ret = ERR_PTR(-ENOMEM); 125 ret = ERR_PTR(-ENOMEM);
153 goto bail; 126 goto done;
154 } 127 }
155 128
156 /* 129 /*
157 * Need to use vmalloc() if we want to support large #s of entries. 130 * Need to use vmalloc() if we want to support large #s of entries.
158 */ 131 */
159 srq->rq.size = srq_init_attr->attr.max_wr + 1; 132 srq->rq.size = srq_init_attr->attr.max_wr + 1;
160 sz = sizeof(struct ipath_sge) * srq_init_attr->attr.max_sge + 133 srq->rq.max_sge = srq_init_attr->attr.max_sge;
134 sz = sizeof(struct ib_sge) * srq->rq.max_sge +
161 sizeof(struct ipath_rwqe); 135 sizeof(struct ipath_rwqe);
162 srq->rq.wq = vmalloc(srq->rq.size * sz); 136 srq->rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + srq->rq.size * sz);
163 if (!srq->rq.wq) { 137 if (!srq->rq.wq) {
164 kfree(srq);
165 ret = ERR_PTR(-ENOMEM); 138 ret = ERR_PTR(-ENOMEM);
166 goto bail; 139 goto bail_srq;
167 } 140 }
168 141
169 /* 142 /*
143 * Return the address of the RWQ as the offset to mmap.
144 * See ipath_mmap() for details.
145 */
146 if (udata && udata->outlen >= sizeof(__u64)) {
147 struct ipath_mmap_info *ip;
148 __u64 offset = (__u64) srq->rq.wq;
149 int err;
150
151 err = ib_copy_to_udata(udata, &offset, sizeof(offset));
152 if (err) {
153 ret = ERR_PTR(err);
154 goto bail_wq;
155 }
156
157 /* Allocate info for ipath_mmap(). */
158 ip = kmalloc(sizeof(*ip), GFP_KERNEL);
159 if (!ip) {
160 ret = ERR_PTR(-ENOMEM);
161 goto bail_wq;
162 }
163 srq->ip = ip;
164 ip->context = ibpd->uobject->context;
165 ip->obj = srq->rq.wq;
166 kref_init(&ip->ref);
167 ip->mmap_cnt = 0;
168 ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
169 srq->rq.size * sz);
170 spin_lock_irq(&dev->pending_lock);
171 ip->next = dev->pending_mmaps;
172 dev->pending_mmaps = ip;
173 spin_unlock_irq(&dev->pending_lock);
174 } else
175 srq->ip = NULL;
176
177 /*
170 * ib_create_srq() will initialize srq->ibsrq. 178 * ib_create_srq() will initialize srq->ibsrq.
171 */ 179 */
172 spin_lock_init(&srq->rq.lock); 180 spin_lock_init(&srq->rq.lock);
173 srq->rq.head = 0; 181 srq->rq.wq->head = 0;
174 srq->rq.tail = 0; 182 srq->rq.wq->tail = 0;
175 srq->rq.max_sge = srq_init_attr->attr.max_sge; 183 srq->rq.max_sge = srq_init_attr->attr.max_sge;
176 srq->limit = srq_init_attr->attr.srq_limit; 184 srq->limit = srq_init_attr->attr.srq_limit;
177 185
186 dev->n_srqs_allocated++;
187
178 ret = &srq->ibsrq; 188 ret = &srq->ibsrq;
189 goto done;
179 190
180 dev->n_srqs_allocated++; 191bail_wq:
192 vfree(srq->rq.wq);
181 193
182bail: 194bail_srq:
195 kfree(srq);
196
197done:
183 return ret; 198 return ret;
184} 199}
185 200
@@ -188,83 +203,130 @@ bail:
188 * @ibsrq: the SRQ to modify 203 * @ibsrq: the SRQ to modify
189 * @attr: the new attributes of the SRQ 204 * @attr: the new attributes of the SRQ
190 * @attr_mask: indicates which attributes to modify 205 * @attr_mask: indicates which attributes to modify
206 * @udata: user data for ipathverbs.so
191 */ 207 */
192int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, 208int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
193 enum ib_srq_attr_mask attr_mask) 209 enum ib_srq_attr_mask attr_mask,
210 struct ib_udata *udata)
194{ 211{
195 struct ipath_srq *srq = to_isrq(ibsrq); 212 struct ipath_srq *srq = to_isrq(ibsrq);
196 unsigned long flags; 213 int ret = 0;
197 int ret;
198 214
199 if (attr_mask & IB_SRQ_MAX_WR) 215 if (attr_mask & IB_SRQ_MAX_WR) {
200 if ((attr->max_wr > ib_ipath_max_srq_wrs) || 216 struct ipath_rwq *owq;
201 (attr->max_sge > srq->rq.max_sge)) { 217 struct ipath_rwq *wq;
202 ret = -EINVAL; 218 struct ipath_rwqe *p;
203 goto bail; 219 u32 sz, size, n, head, tail;
204 }
205 220
206 if (attr_mask & IB_SRQ_LIMIT) 221 /* Check that the requested sizes are below the limits. */
207 if (attr->srq_limit >= srq->rq.size) { 222 if ((attr->max_wr > ib_ipath_max_srq_wrs) ||
223 ((attr_mask & IB_SRQ_LIMIT) ?
224 attr->srq_limit : srq->limit) > attr->max_wr) {
208 ret = -EINVAL; 225 ret = -EINVAL;
209 goto bail; 226 goto bail;
210 } 227 }
211 228
212 if (attr_mask & IB_SRQ_MAX_WR) {
213 struct ipath_rwqe *wq, *p;
214 u32 sz, size, n;
215
216 sz = sizeof(struct ipath_rwqe) + 229 sz = sizeof(struct ipath_rwqe) +
217 attr->max_sge * sizeof(struct ipath_sge); 230 srq->rq.max_sge * sizeof(struct ib_sge);
218 size = attr->max_wr + 1; 231 size = attr->max_wr + 1;
219 wq = vmalloc(size * sz); 232 wq = vmalloc_user(sizeof(struct ipath_rwq) + size * sz);
220 if (!wq) { 233 if (!wq) {
221 ret = -ENOMEM; 234 ret = -ENOMEM;
222 goto bail; 235 goto bail;
223 } 236 }
224 237
225 spin_lock_irqsave(&srq->rq.lock, flags); 238 /*
226 if (srq->rq.head < srq->rq.tail) 239 * Return the address of the RWQ as the offset to mmap.
227 n = srq->rq.size + srq->rq.head - srq->rq.tail; 240 * See ipath_mmap() for details.
241 */
242 if (udata && udata->inlen >= sizeof(__u64)) {
243 __u64 offset_addr;
244 __u64 offset = (__u64) wq;
245
246 ret = ib_copy_from_udata(&offset_addr, udata,
247 sizeof(offset_addr));
248 if (ret) {
249 vfree(wq);
250 goto bail;
251 }
252 udata->outbuf = (void __user *) offset_addr;
253 ret = ib_copy_to_udata(udata, &offset,
254 sizeof(offset));
255 if (ret) {
256 vfree(wq);
257 goto bail;
258 }
259 }
260
261 spin_lock_irq(&srq->rq.lock);
262 /*
263 * validate head pointer value and compute
264 * the number of remaining WQEs.
265 */
266 owq = srq->rq.wq;
267 head = owq->head;
268 if (head >= srq->rq.size)
269 head = 0;
270 tail = owq->tail;
271 if (tail >= srq->rq.size)
272 tail = 0;
273 n = head;
274 if (n < tail)
275 n += srq->rq.size - tail;
228 else 276 else
229 n = srq->rq.head - srq->rq.tail; 277 n -= tail;
230 if (size <= n || size <= srq->limit) { 278 if (size <= n) {
231 spin_unlock_irqrestore(&srq->rq.lock, flags); 279 spin_unlock_irq(&srq->rq.lock);
232 vfree(wq); 280 vfree(wq);
233 ret = -EINVAL; 281 ret = -EINVAL;
234 goto bail; 282 goto bail;
235 } 283 }
236 n = 0; 284 n = 0;
237 p = wq; 285 p = wq->wq;
238 while (srq->rq.tail != srq->rq.head) { 286 while (tail != head) {
239 struct ipath_rwqe *wqe; 287 struct ipath_rwqe *wqe;
240 int i; 288 int i;
241 289
242 wqe = get_rwqe_ptr(&srq->rq, srq->rq.tail); 290 wqe = get_rwqe_ptr(&srq->rq, tail);
243 p->wr_id = wqe->wr_id; 291 p->wr_id = wqe->wr_id;
244 p->length = wqe->length;
245 p->num_sge = wqe->num_sge; 292 p->num_sge = wqe->num_sge;
246 for (i = 0; i < wqe->num_sge; i++) 293 for (i = 0; i < wqe->num_sge; i++)
247 p->sg_list[i] = wqe->sg_list[i]; 294 p->sg_list[i] = wqe->sg_list[i];
248 n++; 295 n++;
249 p = (struct ipath_rwqe *)((char *) p + sz); 296 p = (struct ipath_rwqe *)((char *) p + sz);
250 if (++srq->rq.tail >= srq->rq.size) 297 if (++tail >= srq->rq.size)
251 srq->rq.tail = 0; 298 tail = 0;
252 } 299 }
253 vfree(srq->rq.wq);
254 srq->rq.wq = wq; 300 srq->rq.wq = wq;
255 srq->rq.size = size; 301 srq->rq.size = size;
256 srq->rq.head = n; 302 wq->head = n;
257 srq->rq.tail = 0; 303 wq->tail = 0;
258 srq->rq.max_sge = attr->max_sge; 304 if (attr_mask & IB_SRQ_LIMIT)
259 spin_unlock_irqrestore(&srq->rq.lock, flags); 305 srq->limit = attr->srq_limit;
260 } 306 spin_unlock_irq(&srq->rq.lock);
261 307
262 if (attr_mask & IB_SRQ_LIMIT) { 308 vfree(owq);
263 spin_lock_irqsave(&srq->rq.lock, flags); 309
264 srq->limit = attr->srq_limit; 310 if (srq->ip) {
265 spin_unlock_irqrestore(&srq->rq.lock, flags); 311 struct ipath_mmap_info *ip = srq->ip;
312 struct ipath_ibdev *dev = to_idev(srq->ibsrq.device);
313
314 ip->obj = wq;
315 ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
316 size * sz);
317 spin_lock_irq(&dev->pending_lock);
318 ip->next = dev->pending_mmaps;
319 dev->pending_mmaps = ip;
320 spin_unlock_irq(&dev->pending_lock);
321 }
322 } else if (attr_mask & IB_SRQ_LIMIT) {
323 spin_lock_irq(&srq->rq.lock);
324 if (attr->srq_limit >= srq->rq.size)
325 ret = -EINVAL;
326 else
327 srq->limit = attr->srq_limit;
328 spin_unlock_irq(&srq->rq.lock);
266 } 329 }
267 ret = 0;
268 330
269bail: 331bail:
270 return ret; 332 return ret;
diff --git a/drivers/infiniband/hw/ipath/ipath_stats.c b/drivers/infiniband/hw/ipath/ipath_stats.c
index 70351b7e35c0..30a825928fcf 100644
--- a/drivers/infiniband/hw/ipath/ipath_stats.c
+++ b/drivers/infiniband/hw/ipath/ipath_stats.c
@@ -271,33 +271,6 @@ void ipath_get_faststats(unsigned long opaque)
271 } 271 }
272 } 272 }
273 273
274 if (dd->ipath_nosma_bufs) {
275 dd->ipath_nosma_secs += 5;
276 if (dd->ipath_nosma_secs >= 30) {
277 ipath_cdbg(SMA, "No SMA bufs avail %u seconds; "
278 "cancelling pending sends\n",
279 dd->ipath_nosma_secs);
280 /*
281 * issue an abort as well, in case we have a packet
282 * stuck in launch fifo. This could corrupt an
283 * outgoing user packet in the worst case,
284 * but this is a pretty catastrophic, anyway.
285 */
286 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
287 INFINIPATH_S_ABORT);
288 ipath_disarm_piobufs(dd, dd->ipath_lastport_piobuf,
289 dd->ipath_piobcnt2k +
290 dd->ipath_piobcnt4k -
291 dd->ipath_lastport_piobuf);
292 /* start again, if necessary */
293 dd->ipath_nosma_secs = 0;
294 } else
295 ipath_cdbg(SMA, "No SMA bufs avail %u tries, "
296 "after %u seconds\n",
297 dd->ipath_nosma_bufs,
298 dd->ipath_nosma_secs);
299 }
300
301done: 274done:
302 mod_timer(&dd->ipath_stats_timer, jiffies + HZ * 5); 275 mod_timer(&dd->ipath_stats_timer, jiffies + HZ * 5);
303} 276}
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c
index b98821d7801d..e299148c4b68 100644
--- a/drivers/infiniband/hw/ipath/ipath_sysfs.c
+++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c
@@ -35,7 +35,6 @@
35#include <linux/pci.h> 35#include <linux/pci.h>
36 36
37#include "ipath_kernel.h" 37#include "ipath_kernel.h"
38#include "ipath_layer.h"
39#include "ipath_common.h" 38#include "ipath_common.h"
40 39
41/** 40/**
@@ -76,7 +75,7 @@ bail:
76static ssize_t show_version(struct device_driver *dev, char *buf) 75static ssize_t show_version(struct device_driver *dev, char *buf)
77{ 76{
78 /* The string printed here is already newline-terminated. */ 77 /* The string printed here is already newline-terminated. */
79 return scnprintf(buf, PAGE_SIZE, "%s", ipath_core_version); 78 return scnprintf(buf, PAGE_SIZE, "%s", ib_ipath_version);
80} 79}
81 80
82static ssize_t show_num_units(struct device_driver *dev, char *buf) 81static ssize_t show_num_units(struct device_driver *dev, char *buf)
@@ -108,8 +107,8 @@ static const char *ipath_status_str[] = {
108 "Initted", 107 "Initted",
109 "Disabled", 108 "Disabled",
110 "Admin_Disabled", 109 "Admin_Disabled",
111 "OIB_SMA", 110 "", /* This used to be the old "OIB_SMA" status. */
112 "SMA", 111 "", /* This used to be the old "SMA" status. */
113 "Present", 112 "Present",
114 "IB_link_up", 113 "IB_link_up",
115 "IB_configured", 114 "IB_configured",
@@ -227,7 +226,6 @@ static ssize_t store_mlid(struct device *dev,
227 unit = dd->ipath_unit; 226 unit = dd->ipath_unit;
228 227
229 dd->ipath_mlid = mlid; 228 dd->ipath_mlid = mlid;
230 ipath_layer_intr(dd, IPATH_LAYER_INT_BCAST);
231 229
232 goto bail; 230 goto bail;
233invalid: 231invalid:
@@ -467,7 +465,7 @@ static ssize_t store_link_state(struct device *dev,
467 if (ret < 0) 465 if (ret < 0)
468 goto invalid; 466 goto invalid;
469 467
470 r = ipath_layer_set_linkstate(dd, state); 468 r = ipath_set_linkstate(dd, state);
471 if (r < 0) { 469 if (r < 0) {
472 ret = r; 470 ret = r;
473 goto bail; 471 goto bail;
@@ -502,7 +500,7 @@ static ssize_t store_mtu(struct device *dev,
502 if (ret < 0) 500 if (ret < 0)
503 goto invalid; 501 goto invalid;
504 502
505 r = ipath_layer_set_mtu(dd, mtu); 503 r = ipath_set_mtu(dd, mtu);
506 if (r < 0) 504 if (r < 0)
507 ret = r; 505 ret = r;
508 506
@@ -563,6 +561,33 @@ bail:
563 return ret; 561 return ret;
564} 562}
565 563
564static ssize_t store_rx_pol_inv(struct device *dev,
565 struct device_attribute *attr,
566 const char *buf,
567 size_t count)
568{
569 struct ipath_devdata *dd = dev_get_drvdata(dev);
570 int ret, r;
571 u16 val;
572
573 ret = ipath_parse_ushort(buf, &val);
574 if (ret < 0)
575 goto invalid;
576
577 r = ipath_set_rx_pol_inv(dd, val);
578 if (r < 0) {
579 ret = r;
580 goto bail;
581 }
582
583 goto bail;
584invalid:
585 ipath_dev_err(dd, "attempt to set invalid Rx Polarity invert\n");
586bail:
587 return ret;
588}
589
590
566static DRIVER_ATTR(num_units, S_IRUGO, show_num_units, NULL); 591static DRIVER_ATTR(num_units, S_IRUGO, show_num_units, NULL);
567static DRIVER_ATTR(version, S_IRUGO, show_version, NULL); 592static DRIVER_ATTR(version, S_IRUGO, show_version, NULL);
568 593
@@ -589,6 +614,7 @@ static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
589static DEVICE_ATTR(status_str, S_IRUGO, show_status_str, NULL); 614static DEVICE_ATTR(status_str, S_IRUGO, show_status_str, NULL);
590static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL); 615static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
591static DEVICE_ATTR(unit, S_IRUGO, show_unit, NULL); 616static DEVICE_ATTR(unit, S_IRUGO, show_unit, NULL);
617static DEVICE_ATTR(rx_pol_inv, S_IWUSR, NULL, store_rx_pol_inv);
592 618
593static struct attribute *dev_attributes[] = { 619static struct attribute *dev_attributes[] = {
594 &dev_attr_guid.attr, 620 &dev_attr_guid.attr,
@@ -603,6 +629,7 @@ static struct attribute *dev_attributes[] = {
603 &dev_attr_boardversion.attr, 629 &dev_attr_boardversion.attr,
604 &dev_attr_unit.attr, 630 &dev_attr_unit.attr,
605 &dev_attr_enabled.attr, 631 &dev_attr_enabled.attr,
632 &dev_attr_rx_pol_inv.attr,
606 NULL 633 NULL
607}; 634};
608 635
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c
index c33abea2d5a7..0fd3cded16ba 100644
--- a/drivers/infiniband/hw/ipath/ipath_uc.c
+++ b/drivers/infiniband/hw/ipath/ipath_uc.c
@@ -32,7 +32,7 @@
32 */ 32 */
33 33
34#include "ipath_verbs.h" 34#include "ipath_verbs.h"
35#include "ipath_common.h" 35#include "ipath_kernel.h"
36 36
37/* cut down ridiculously long IB macro names */ 37/* cut down ridiculously long IB macro names */
38#define OP(x) IB_OPCODE_UC_##x 38#define OP(x) IB_OPCODE_UC_##x
@@ -261,8 +261,7 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
261 * size to 56 bytes so the last 4 bytes of 261 * size to 56 bytes so the last 4 bytes of
262 * the BTH header (PSN) is in the data buffer. 262 * the BTH header (PSN) is in the data buffer.
263 */ 263 */
264 header_in_data = 264 header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
265 ipath_layer_get_rcvhdrentsize(dev->dd) == 16;
266 if (header_in_data) { 265 if (header_in_data) {
267 psn = be32_to_cpu(((__be32 *) data)[0]); 266 psn = be32_to_cpu(((__be32 *) data)[0]);
268 data += sizeof(__be32); 267 data += sizeof(__be32);
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index 3466129af804..6991d1d74e3c 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -34,7 +34,54 @@
34#include <rdma/ib_smi.h> 34#include <rdma/ib_smi.h>
35 35
36#include "ipath_verbs.h" 36#include "ipath_verbs.h"
37#include "ipath_common.h" 37#include "ipath_kernel.h"
38
39static int init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,
40 u32 *lengthp, struct ipath_sge_state *ss)
41{
42 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
43 int user = to_ipd(qp->ibqp.pd)->user;
44 int i, j, ret;
45 struct ib_wc wc;
46
47 *lengthp = 0;
48 for (i = j = 0; i < wqe->num_sge; i++) {
49 if (wqe->sg_list[i].length == 0)
50 continue;
51 /* Check LKEY */
52 if ((user && wqe->sg_list[i].lkey == 0) ||
53 !ipath_lkey_ok(&dev->lk_table,
54 j ? &ss->sg_list[j - 1] : &ss->sge,
55 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
56 goto bad_lkey;
57 *lengthp += wqe->sg_list[i].length;
58 j++;
59 }
60 ss->num_sge = j;
61 ret = 1;
62 goto bail;
63
64bad_lkey:
65 wc.wr_id = wqe->wr_id;
66 wc.status = IB_WC_LOC_PROT_ERR;
67 wc.opcode = IB_WC_RECV;
68 wc.vendor_err = 0;
69 wc.byte_len = 0;
70 wc.imm_data = 0;
71 wc.qp_num = qp->ibqp.qp_num;
72 wc.src_qp = 0;
73 wc.wc_flags = 0;
74 wc.pkey_index = 0;
75 wc.slid = 0;
76 wc.sl = 0;
77 wc.dlid_path_bits = 0;
78 wc.port_num = 0;
79 /* Signal solicited completion event. */
80 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
81 ret = 0;
82bail:
83 return ret;
84}
38 85
39/** 86/**
40 * ipath_ud_loopback - handle send on loopback QPs 87 * ipath_ud_loopback - handle send on loopback QPs
@@ -46,6 +93,8 @@
46 * 93 *
47 * This is called from ipath_post_ud_send() to forward a WQE addressed 94 * This is called from ipath_post_ud_send() to forward a WQE addressed
48 * to the same HCA. 95 * to the same HCA.
96 * Note that the receive interrupt handler may be calling ipath_ud_rcv()
97 * while this is being called.
49 */ 98 */
50static void ipath_ud_loopback(struct ipath_qp *sqp, 99static void ipath_ud_loopback(struct ipath_qp *sqp,
51 struct ipath_sge_state *ss, 100 struct ipath_sge_state *ss,
@@ -60,7 +109,11 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
60 struct ipath_srq *srq; 109 struct ipath_srq *srq;
61 struct ipath_sge_state rsge; 110 struct ipath_sge_state rsge;
62 struct ipath_sge *sge; 111 struct ipath_sge *sge;
112 struct ipath_rwq *wq;
63 struct ipath_rwqe *wqe; 113 struct ipath_rwqe *wqe;
114 void (*handler)(struct ib_event *, void *);
115 u32 tail;
116 u32 rlen;
64 117
65 qp = ipath_lookup_qpn(&dev->qp_table, wr->wr.ud.remote_qpn); 118 qp = ipath_lookup_qpn(&dev->qp_table, wr->wr.ud.remote_qpn);
66 if (!qp) 119 if (!qp)
@@ -94,6 +147,13 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
94 wc->imm_data = 0; 147 wc->imm_data = 0;
95 } 148 }
96 149
150 if (wr->num_sge > 1) {
151 rsge.sg_list = kmalloc((wr->num_sge - 1) *
152 sizeof(struct ipath_sge),
153 GFP_ATOMIC);
154 } else
155 rsge.sg_list = NULL;
156
97 /* 157 /*
98 * Get the next work request entry to find where to put the data. 158 * Get the next work request entry to find where to put the data.
99 * Note that it is safe to drop the lock after changing rq->tail 159 * Note that it is safe to drop the lock after changing rq->tail
@@ -101,37 +161,52 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
101 */ 161 */
102 if (qp->ibqp.srq) { 162 if (qp->ibqp.srq) {
103 srq = to_isrq(qp->ibqp.srq); 163 srq = to_isrq(qp->ibqp.srq);
164 handler = srq->ibsrq.event_handler;
104 rq = &srq->rq; 165 rq = &srq->rq;
105 } else { 166 } else {
106 srq = NULL; 167 srq = NULL;
168 handler = NULL;
107 rq = &qp->r_rq; 169 rq = &qp->r_rq;
108 } 170 }
171
109 spin_lock_irqsave(&rq->lock, flags); 172 spin_lock_irqsave(&rq->lock, flags);
110 if (rq->tail == rq->head) { 173 wq = rq->wq;
111 spin_unlock_irqrestore(&rq->lock, flags); 174 tail = wq->tail;
112 dev->n_pkt_drops++; 175 while (1) {
113 goto done; 176 if (unlikely(tail == wq->head)) {
177 spin_unlock_irqrestore(&rq->lock, flags);
178 dev->n_pkt_drops++;
179 goto bail_sge;
180 }
181 wqe = get_rwqe_ptr(rq, tail);
182 if (++tail >= rq->size)
183 tail = 0;
184 if (init_sge(qp, wqe, &rlen, &rsge))
185 break;
186 wq->tail = tail;
114 } 187 }
115 /* Silently drop packets which are too big. */ 188 /* Silently drop packets which are too big. */
116 wqe = get_rwqe_ptr(rq, rq->tail); 189 if (wc->byte_len > rlen) {
117 if (wc->byte_len > wqe->length) {
118 spin_unlock_irqrestore(&rq->lock, flags); 190 spin_unlock_irqrestore(&rq->lock, flags);
119 dev->n_pkt_drops++; 191 dev->n_pkt_drops++;
120 goto done; 192 goto bail_sge;
121 } 193 }
194 wq->tail = tail;
122 wc->wr_id = wqe->wr_id; 195 wc->wr_id = wqe->wr_id;
123 rsge.sge = wqe->sg_list[0]; 196 if (handler) {
124 rsge.sg_list = wqe->sg_list + 1;
125 rsge.num_sge = wqe->num_sge;
126 if (++rq->tail >= rq->size)
127 rq->tail = 0;
128 if (srq && srq->ibsrq.event_handler) {
129 u32 n; 197 u32 n;
130 198
131 if (rq->head < rq->tail) 199 /*
132 n = rq->size + rq->head - rq->tail; 200 * validate head pointer value and compute
201 * the number of remaining WQEs.
202 */
203 n = wq->head;
204 if (n >= rq->size)
205 n = 0;
206 if (n < tail)
207 n += rq->size - tail;
133 else 208 else
134 n = rq->head - rq->tail; 209 n -= tail;
135 if (n < srq->limit) { 210 if (n < srq->limit) {
136 struct ib_event ev; 211 struct ib_event ev;
137 212
@@ -140,12 +215,12 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
140 ev.device = qp->ibqp.device; 215 ev.device = qp->ibqp.device;
141 ev.element.srq = qp->ibqp.srq; 216 ev.element.srq = qp->ibqp.srq;
142 ev.event = IB_EVENT_SRQ_LIMIT_REACHED; 217 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
143 srq->ibsrq.event_handler(&ev, 218 handler(&ev, srq->ibsrq.srq_context);
144 srq->ibsrq.srq_context);
145 } else 219 } else
146 spin_unlock_irqrestore(&rq->lock, flags); 220 spin_unlock_irqrestore(&rq->lock, flags);
147 } else 221 } else
148 spin_unlock_irqrestore(&rq->lock, flags); 222 spin_unlock_irqrestore(&rq->lock, flags);
223
149 ah_attr = &to_iah(wr->wr.ud.ah)->attr; 224 ah_attr = &to_iah(wr->wr.ud.ah)->attr;
150 if (ah_attr->ah_flags & IB_AH_GRH) { 225 if (ah_attr->ah_flags & IB_AH_GRH) {
151 ipath_copy_sge(&rsge, &ah_attr->grh, sizeof(struct ib_grh)); 226 ipath_copy_sge(&rsge, &ah_attr->grh, sizeof(struct ib_grh));
@@ -186,7 +261,7 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
186 wc->src_qp = sqp->ibqp.qp_num; 261 wc->src_qp = sqp->ibqp.qp_num;
187 /* XXX do we know which pkey matched? Only needed for GSI. */ 262 /* XXX do we know which pkey matched? Only needed for GSI. */
188 wc->pkey_index = 0; 263 wc->pkey_index = 0;
189 wc->slid = ipath_layer_get_lid(dev->dd) | 264 wc->slid = dev->dd->ipath_lid |
190 (ah_attr->src_path_bits & 265 (ah_attr->src_path_bits &
191 ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1)); 266 ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1));
192 wc->sl = ah_attr->sl; 267 wc->sl = ah_attr->sl;
@@ -196,6 +271,8 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
196 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), wc, 271 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), wc,
197 wr->send_flags & IB_SEND_SOLICITED); 272 wr->send_flags & IB_SEND_SOLICITED);
198 273
274bail_sge:
275 kfree(rsge.sg_list);
199done: 276done:
200 if (atomic_dec_and_test(&qp->refcount)) 277 if (atomic_dec_and_test(&qp->refcount))
201 wake_up(&qp->wait); 278 wake_up(&qp->wait);
@@ -276,7 +353,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr)
276 ss.num_sge++; 353 ss.num_sge++;
277 } 354 }
278 /* Check for invalid packet size. */ 355 /* Check for invalid packet size. */
279 if (len > ipath_layer_get_ibmtu(dev->dd)) { 356 if (len > dev->dd->ipath_ibmtu) {
280 ret = -EINVAL; 357 ret = -EINVAL;
281 goto bail; 358 goto bail;
282 } 359 }
@@ -298,7 +375,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr)
298 dev->n_unicast_xmit++; 375 dev->n_unicast_xmit++;
299 lid = ah_attr->dlid & 376 lid = ah_attr->dlid &
300 ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); 377 ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
301 if (unlikely(lid == ipath_layer_get_lid(dev->dd))) { 378 if (unlikely(lid == dev->dd->ipath_lid)) {
302 /* 379 /*
303 * Pass in an uninitialized ib_wc to save stack 380 * Pass in an uninitialized ib_wc to save stack
304 * space. 381 * space.
@@ -327,7 +404,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr)
327 qp->s_hdr.u.l.grh.sgid.global.subnet_prefix = 404 qp->s_hdr.u.l.grh.sgid.global.subnet_prefix =
328 dev->gid_prefix; 405 dev->gid_prefix;
329 qp->s_hdr.u.l.grh.sgid.global.interface_id = 406 qp->s_hdr.u.l.grh.sgid.global.interface_id =
330 ipath_layer_get_guid(dev->dd); 407 dev->dd->ipath_guid;
331 qp->s_hdr.u.l.grh.dgid = ah_attr->grh.dgid; 408 qp->s_hdr.u.l.grh.dgid = ah_attr->grh.dgid;
332 /* 409 /*
333 * Don't worry about sending to locally attached multicast 410 * Don't worry about sending to locally attached multicast
@@ -357,7 +434,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr)
357 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); 434 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
358 qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */ 435 qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
359 qp->s_hdr.lrh[2] = cpu_to_be16(hwords + nwords + SIZE_OF_CRC); 436 qp->s_hdr.lrh[2] = cpu_to_be16(hwords + nwords + SIZE_OF_CRC);
360 lid = ipath_layer_get_lid(dev->dd); 437 lid = dev->dd->ipath_lid;
361 if (lid) { 438 if (lid) {
362 lid |= ah_attr->src_path_bits & 439 lid |= ah_attr->src_path_bits &
363 ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); 440 ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
@@ -368,7 +445,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr)
368 bth0 |= 1 << 23; 445 bth0 |= 1 << 23;
369 bth0 |= extra_bytes << 20; 446 bth0 |= extra_bytes << 20;
370 bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPATH_DEFAULT_P_KEY : 447 bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPATH_DEFAULT_P_KEY :
371 ipath_layer_get_pkey(dev->dd, qp->s_pkey_index); 448 ipath_get_pkey(dev->dd, qp->s_pkey_index);
372 ohdr->bth[0] = cpu_to_be32(bth0); 449 ohdr->bth[0] = cpu_to_be32(bth0);
373 /* 450 /*
374 * Use the multicast QP if the destination LID is a multicast LID. 451 * Use the multicast QP if the destination LID is a multicast LID.
@@ -433,13 +510,9 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
433 int opcode; 510 int opcode;
434 u32 hdrsize; 511 u32 hdrsize;
435 u32 pad; 512 u32 pad;
436 unsigned long flags;
437 struct ib_wc wc; 513 struct ib_wc wc;
438 u32 qkey; 514 u32 qkey;
439 u32 src_qp; 515 u32 src_qp;
440 struct ipath_rq *rq;
441 struct ipath_srq *srq;
442 struct ipath_rwqe *wqe;
443 u16 dlid; 516 u16 dlid;
444 int header_in_data; 517 int header_in_data;
445 518
@@ -458,8 +531,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
458 * the eager header buffer size to 56 bytes so the last 12 531 * the eager header buffer size to 56 bytes so the last 12
459 * bytes of the IB header is in the data buffer. 532 * bytes of the IB header is in the data buffer.
460 */ 533 */
461 header_in_data = 534 header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
462 ipath_layer_get_rcvhdrentsize(dev->dd) == 16;
463 if (header_in_data) { 535 if (header_in_data) {
464 qkey = be32_to_cpu(((__be32 *) data)[1]); 536 qkey = be32_to_cpu(((__be32 *) data)[1]);
465 src_qp = be32_to_cpu(((__be32 *) data)[2]); 537 src_qp = be32_to_cpu(((__be32 *) data)[2]);
@@ -547,19 +619,10 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
547 619
548 /* 620 /*
549 * Get the next work request entry to find where to put the data. 621 * Get the next work request entry to find where to put the data.
550 * Note that it is safe to drop the lock after changing rq->tail
551 * since ipath_post_receive() won't fill the empty slot.
552 */ 622 */
553 if (qp->ibqp.srq) { 623 if (qp->r_reuse_sge)
554 srq = to_isrq(qp->ibqp.srq); 624 qp->r_reuse_sge = 0;
555 rq = &srq->rq; 625 else if (!ipath_get_rwqe(qp, 0)) {
556 } else {
557 srq = NULL;
558 rq = &qp->r_rq;
559 }
560 spin_lock_irqsave(&rq->lock, flags);
561 if (rq->tail == rq->head) {
562 spin_unlock_irqrestore(&rq->lock, flags);
563 /* 626 /*
564 * Count VL15 packets dropped due to no receive buffer. 627 * Count VL15 packets dropped due to no receive buffer.
565 * Otherwise, count them as buffer overruns since usually, 628 * Otherwise, count them as buffer overruns since usually,
@@ -573,39 +636,11 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
573 goto bail; 636 goto bail;
574 } 637 }
575 /* Silently drop packets which are too big. */ 638 /* Silently drop packets which are too big. */
576 wqe = get_rwqe_ptr(rq, rq->tail); 639 if (wc.byte_len > qp->r_len) {
577 if (wc.byte_len > wqe->length) { 640 qp->r_reuse_sge = 1;
578 spin_unlock_irqrestore(&rq->lock, flags);
579 dev->n_pkt_drops++; 641 dev->n_pkt_drops++;
580 goto bail; 642 goto bail;
581 } 643 }
582 wc.wr_id = wqe->wr_id;
583 qp->r_sge.sge = wqe->sg_list[0];
584 qp->r_sge.sg_list = wqe->sg_list + 1;
585 qp->r_sge.num_sge = wqe->num_sge;
586 if (++rq->tail >= rq->size)
587 rq->tail = 0;
588 if (srq && srq->ibsrq.event_handler) {
589 u32 n;
590
591 if (rq->head < rq->tail)
592 n = rq->size + rq->head - rq->tail;
593 else
594 n = rq->head - rq->tail;
595 if (n < srq->limit) {
596 struct ib_event ev;
597
598 srq->limit = 0;
599 spin_unlock_irqrestore(&rq->lock, flags);
600 ev.device = qp->ibqp.device;
601 ev.element.srq = qp->ibqp.srq;
602 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
603 srq->ibsrq.event_handler(&ev,
604 srq->ibsrq.srq_context);
605 } else
606 spin_unlock_irqrestore(&rq->lock, flags);
607 } else
608 spin_unlock_irqrestore(&rq->lock, flags);
609 if (has_grh) { 644 if (has_grh) {
610 ipath_copy_sge(&qp->r_sge, &hdr->u.l.grh, 645 ipath_copy_sge(&qp->r_sge, &hdr->u.l.grh,
611 sizeof(struct ib_grh)); 646 sizeof(struct ib_grh));
@@ -614,6 +649,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
614 ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh)); 649 ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh));
615 ipath_copy_sge(&qp->r_sge, data, 650 ipath_copy_sge(&qp->r_sge, data,
616 wc.byte_len - sizeof(struct ib_grh)); 651 wc.byte_len - sizeof(struct ib_grh));
652 wc.wr_id = qp->r_wr_id;
617 wc.status = IB_WC_SUCCESS; 653 wc.status = IB_WC_SUCCESS;
618 wc.opcode = IB_WC_RECV; 654 wc.opcode = IB_WC_RECV;
619 wc.vendor_err = 0; 655 wc.vendor_err = 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index d70a9b6b5239..b8381c5e72bd 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -33,15 +33,13 @@
33 33
34#include <rdma/ib_mad.h> 34#include <rdma/ib_mad.h>
35#include <rdma/ib_user_verbs.h> 35#include <rdma/ib_user_verbs.h>
36#include <linux/io.h>
36#include <linux/utsname.h> 37#include <linux/utsname.h>
37 38
38#include "ipath_kernel.h" 39#include "ipath_kernel.h"
39#include "ipath_verbs.h" 40#include "ipath_verbs.h"
40#include "ipath_common.h" 41#include "ipath_common.h"
41 42
42/* Not static, because we don't want the compiler removing it */
43const char ipath_verbs_version[] = "ipath_verbs " IPATH_IDSTR;
44
45static unsigned int ib_ipath_qp_table_size = 251; 43static unsigned int ib_ipath_qp_table_size = 251;
46module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO); 44module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO);
47MODULE_PARM_DESC(qp_table_size, "QP table size"); 45MODULE_PARM_DESC(qp_table_size, "QP table size");
@@ -52,10 +50,6 @@ module_param_named(lkey_table_size, ib_ipath_lkey_table_size, uint,
52MODULE_PARM_DESC(lkey_table_size, 50MODULE_PARM_DESC(lkey_table_size,
53 "LKEY table size in bits (2^n, 1 <= n <= 23)"); 51 "LKEY table size in bits (2^n, 1 <= n <= 23)");
54 52
55unsigned int ib_ipath_debug; /* debug mask */
56module_param_named(debug, ib_ipath_debug, uint, S_IWUSR | S_IRUGO);
57MODULE_PARM_DESC(debug, "Verbs debug mask");
58
59static unsigned int ib_ipath_max_pds = 0xFFFF; 53static unsigned int ib_ipath_max_pds = 0xFFFF;
60module_param_named(max_pds, ib_ipath_max_pds, uint, S_IWUSR | S_IRUGO); 54module_param_named(max_pds, ib_ipath_max_pds, uint, S_IWUSR | S_IRUGO);
61MODULE_PARM_DESC(max_pds, 55MODULE_PARM_DESC(max_pds,
@@ -79,6 +73,10 @@ module_param_named(max_qp_wrs, ib_ipath_max_qp_wrs, uint,
79 S_IWUSR | S_IRUGO); 73 S_IWUSR | S_IRUGO);
80MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support"); 74MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
81 75
76unsigned int ib_ipath_max_qps = 16384;
77module_param_named(max_qps, ib_ipath_max_qps, uint, S_IWUSR | S_IRUGO);
78MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
79
82unsigned int ib_ipath_max_sges = 0x60; 80unsigned int ib_ipath_max_sges = 0x60;
83module_param_named(max_sges, ib_ipath_max_sges, uint, S_IWUSR | S_IRUGO); 81module_param_named(max_sges, ib_ipath_max_sges, uint, S_IWUSR | S_IRUGO);
84MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support"); 82MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
@@ -109,9 +107,9 @@ module_param_named(max_srq_wrs, ib_ipath_max_srq_wrs,
109 uint, S_IWUSR | S_IRUGO); 107 uint, S_IWUSR | S_IRUGO);
110MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support"); 108MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
111 109
112MODULE_LICENSE("GPL"); 110static unsigned int ib_ipath_disable_sma;
113MODULE_AUTHOR("QLogic <support@pathscale.com>"); 111module_param_named(disable_sma, ib_ipath_disable_sma, uint, S_IWUSR | S_IRUGO);
114MODULE_DESCRIPTION("QLogic InfiniPath driver"); 112MODULE_PARM_DESC(ib_ipath_disable_sma, "Disable the SMA");
115 113
116const int ib_ipath_state_ops[IB_QPS_ERR + 1] = { 114const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
117 [IB_QPS_RESET] = 0, 115 [IB_QPS_RESET] = 0,
@@ -125,6 +123,16 @@ const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
125 [IB_QPS_ERR] = 0, 123 [IB_QPS_ERR] = 0,
126}; 124};
127 125
126struct ipath_ucontext {
127 struct ib_ucontext ibucontext;
128};
129
130static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext
131 *ibucontext)
132{
133 return container_of(ibucontext, struct ipath_ucontext, ibucontext);
134}
135
128/* 136/*
129 * Translate ib_wr_opcode into ib_wc_opcode. 137 * Translate ib_wr_opcode into ib_wc_opcode.
130 */ 138 */
@@ -277,11 +285,12 @@ static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
277 struct ib_recv_wr **bad_wr) 285 struct ib_recv_wr **bad_wr)
278{ 286{
279 struct ipath_qp *qp = to_iqp(ibqp); 287 struct ipath_qp *qp = to_iqp(ibqp);
288 struct ipath_rwq *wq = qp->r_rq.wq;
280 unsigned long flags; 289 unsigned long flags;
281 int ret; 290 int ret;
282 291
283 /* Check that state is OK to post receive. */ 292 /* Check that state is OK to post receive. */
284 if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK)) { 293 if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK) || !wq) {
285 *bad_wr = wr; 294 *bad_wr = wr;
286 ret = -EINVAL; 295 ret = -EINVAL;
287 goto bail; 296 goto bail;
@@ -290,59 +299,31 @@ static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
290 for (; wr; wr = wr->next) { 299 for (; wr; wr = wr->next) {
291 struct ipath_rwqe *wqe; 300 struct ipath_rwqe *wqe;
292 u32 next; 301 u32 next;
293 int i, j; 302 int i;
294 303
295 if (wr->num_sge > qp->r_rq.max_sge) { 304 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
296 *bad_wr = wr; 305 *bad_wr = wr;
297 ret = -ENOMEM; 306 ret = -ENOMEM;
298 goto bail; 307 goto bail;
299 } 308 }
300 309
301 spin_lock_irqsave(&qp->r_rq.lock, flags); 310 spin_lock_irqsave(&qp->r_rq.lock, flags);
302 next = qp->r_rq.head + 1; 311 next = wq->head + 1;
303 if (next >= qp->r_rq.size) 312 if (next >= qp->r_rq.size)
304 next = 0; 313 next = 0;
305 if (next == qp->r_rq.tail) { 314 if (next == wq->tail) {
306 spin_unlock_irqrestore(&qp->r_rq.lock, flags); 315 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
307 *bad_wr = wr; 316 *bad_wr = wr;
308 ret = -ENOMEM; 317 ret = -ENOMEM;
309 goto bail; 318 goto bail;
310 } 319 }
311 320
312 wqe = get_rwqe_ptr(&qp->r_rq, qp->r_rq.head); 321 wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
313 wqe->wr_id = wr->wr_id; 322 wqe->wr_id = wr->wr_id;
314 wqe->sg_list[0].mr = NULL; 323 wqe->num_sge = wr->num_sge;
315 wqe->sg_list[0].vaddr = NULL; 324 for (i = 0; i < wr->num_sge; i++)
316 wqe->sg_list[0].length = 0; 325 wqe->sg_list[i] = wr->sg_list[i];
317 wqe->sg_list[0].sge_length = 0; 326 wq->head = next;
318 wqe->length = 0;
319 for (i = 0, j = 0; i < wr->num_sge; i++) {
320 /* Check LKEY */
321 if (to_ipd(qp->ibqp.pd)->user &&
322 wr->sg_list[i].lkey == 0) {
323 spin_unlock_irqrestore(&qp->r_rq.lock,
324 flags);
325 *bad_wr = wr;
326 ret = -EINVAL;
327 goto bail;
328 }
329 if (wr->sg_list[i].length == 0)
330 continue;
331 if (!ipath_lkey_ok(
332 &to_idev(qp->ibqp.device)->lk_table,
333 &wqe->sg_list[j], &wr->sg_list[i],
334 IB_ACCESS_LOCAL_WRITE)) {
335 spin_unlock_irqrestore(&qp->r_rq.lock,
336 flags);
337 *bad_wr = wr;
338 ret = -EINVAL;
339 goto bail;
340 }
341 wqe->length += wr->sg_list[i].length;
342 j++;
343 }
344 wqe->num_sge = j;
345 qp->r_rq.head = next;
346 spin_unlock_irqrestore(&qp->r_rq.lock, flags); 327 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
347 } 328 }
348 ret = 0; 329 ret = 0;
@@ -377,6 +358,9 @@ static void ipath_qp_rcv(struct ipath_ibdev *dev,
377 switch (qp->ibqp.qp_type) { 358 switch (qp->ibqp.qp_type) {
378 case IB_QPT_SMI: 359 case IB_QPT_SMI:
379 case IB_QPT_GSI: 360 case IB_QPT_GSI:
361 if (ib_ipath_disable_sma)
362 break;
363 /* FALLTHROUGH */
380 case IB_QPT_UD: 364 case IB_QPT_UD:
381 ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp); 365 ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp);
382 break; 366 break;
@@ -395,7 +379,7 @@ static void ipath_qp_rcv(struct ipath_ibdev *dev,
395} 379}
396 380
397/** 381/**
398 * ipath_ib_rcv - process and incoming packet 382 * ipath_ib_rcv - process an incoming packet
399 * @arg: the device pointer 383 * @arg: the device pointer
400 * @rhdr: the header of the packet 384 * @rhdr: the header of the packet
401 * @data: the packet data 385 * @data: the packet data
@@ -404,9 +388,9 @@ static void ipath_qp_rcv(struct ipath_ibdev *dev,
404 * This is called from ipath_kreceive() to process an incoming packet at 388 * This is called from ipath_kreceive() to process an incoming packet at
405 * interrupt level. Tlen is the length of the header + data + CRC in bytes. 389 * interrupt level. Tlen is the length of the header + data + CRC in bytes.
406 */ 390 */
407static void ipath_ib_rcv(void *arg, void *rhdr, void *data, u32 tlen) 391void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
392 u32 tlen)
408{ 393{
409 struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
410 struct ipath_ib_header *hdr = rhdr; 394 struct ipath_ib_header *hdr = rhdr;
411 struct ipath_other_headers *ohdr; 395 struct ipath_other_headers *ohdr;
412 struct ipath_qp *qp; 396 struct ipath_qp *qp;
@@ -427,7 +411,7 @@ static void ipath_ib_rcv(void *arg, void *rhdr, void *data, u32 tlen)
427 lid = be16_to_cpu(hdr->lrh[1]); 411 lid = be16_to_cpu(hdr->lrh[1]);
428 if (lid < IPATH_MULTICAST_LID_BASE) { 412 if (lid < IPATH_MULTICAST_LID_BASE) {
429 lid &= ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); 413 lid &= ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
430 if (unlikely(lid != ipath_layer_get_lid(dev->dd))) { 414 if (unlikely(lid != dev->dd->ipath_lid)) {
431 dev->rcv_errors++; 415 dev->rcv_errors++;
432 goto bail; 416 goto bail;
433 } 417 }
@@ -495,9 +479,8 @@ bail:;
495 * This is called from ipath_do_rcv_timer() at interrupt level to check for 479 * This is called from ipath_do_rcv_timer() at interrupt level to check for
496 * QPs which need retransmits and to collect performance numbers. 480 * QPs which need retransmits and to collect performance numbers.
497 */ 481 */
498static void ipath_ib_timer(void *arg) 482void ipath_ib_timer(struct ipath_ibdev *dev)
499{ 483{
500 struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
501 struct ipath_qp *resend = NULL; 484 struct ipath_qp *resend = NULL;
502 struct list_head *last; 485 struct list_head *last;
503 struct ipath_qp *qp; 486 struct ipath_qp *qp;
@@ -539,19 +522,19 @@ static void ipath_ib_timer(void *arg)
539 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED && 522 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED &&
540 --dev->pma_sample_start == 0) { 523 --dev->pma_sample_start == 0) {
541 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING; 524 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
542 ipath_layer_snapshot_counters(dev->dd, &dev->ipath_sword, 525 ipath_snapshot_counters(dev->dd, &dev->ipath_sword,
543 &dev->ipath_rword, 526 &dev->ipath_rword,
544 &dev->ipath_spkts, 527 &dev->ipath_spkts,
545 &dev->ipath_rpkts, 528 &dev->ipath_rpkts,
546 &dev->ipath_xmit_wait); 529 &dev->ipath_xmit_wait);
547 } 530 }
548 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) { 531 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
549 if (dev->pma_sample_interval == 0) { 532 if (dev->pma_sample_interval == 0) {
550 u64 ta, tb, tc, td, te; 533 u64 ta, tb, tc, td, te;
551 534
552 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE; 535 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
553 ipath_layer_snapshot_counters(dev->dd, &ta, &tb, 536 ipath_snapshot_counters(dev->dd, &ta, &tb,
554 &tc, &td, &te); 537 &tc, &td, &te);
555 538
556 dev->ipath_sword = ta - dev->ipath_sword; 539 dev->ipath_sword = ta - dev->ipath_sword;
557 dev->ipath_rword = tb - dev->ipath_rword; 540 dev->ipath_rword = tb - dev->ipath_rword;
@@ -581,6 +564,362 @@ static void ipath_ib_timer(void *arg)
581 } 564 }
582} 565}
583 566
567static void update_sge(struct ipath_sge_state *ss, u32 length)
568{
569 struct ipath_sge *sge = &ss->sge;
570
571 sge->vaddr += length;
572 sge->length -= length;
573 sge->sge_length -= length;
574 if (sge->sge_length == 0) {
575 if (--ss->num_sge)
576 *sge = *ss->sg_list++;
577 } else if (sge->length == 0 && sge->mr != NULL) {
578 if (++sge->n >= IPATH_SEGSZ) {
579 if (++sge->m >= sge->mr->mapsz)
580 return;
581 sge->n = 0;
582 }
583 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
584 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
585 }
586}
587
588#ifdef __LITTLE_ENDIAN
589static inline u32 get_upper_bits(u32 data, u32 shift)
590{
591 return data >> shift;
592}
593
594static inline u32 set_upper_bits(u32 data, u32 shift)
595{
596 return data << shift;
597}
598
599static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
600{
601 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
602 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
603 return data;
604}
605#else
606static inline u32 get_upper_bits(u32 data, u32 shift)
607{
608 return data << shift;
609}
610
611static inline u32 set_upper_bits(u32 data, u32 shift)
612{
613 return data >> shift;
614}
615
616static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
617{
618 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
619 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
620 return data;
621}
622#endif
623
624static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
625 u32 length)
626{
627 u32 extra = 0;
628 u32 data = 0;
629 u32 last;
630
631 while (1) {
632 u32 len = ss->sge.length;
633 u32 off;
634
635 BUG_ON(len == 0);
636 if (len > length)
637 len = length;
638 if (len > ss->sge.sge_length)
639 len = ss->sge.sge_length;
640 /* If the source address is not aligned, try to align it. */
641 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
642 if (off) {
643 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
644 ~(sizeof(u32) - 1));
645 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
646 u32 y;
647
648 y = sizeof(u32) - off;
649 if (len > y)
650 len = y;
651 if (len + extra >= sizeof(u32)) {
652 data |= set_upper_bits(v, extra *
653 BITS_PER_BYTE);
654 len = sizeof(u32) - extra;
655 if (len == length) {
656 last = data;
657 break;
658 }
659 __raw_writel(data, piobuf);
660 piobuf++;
661 extra = 0;
662 data = 0;
663 } else {
664 /* Clear unused upper bytes */
665 data |= clear_upper_bytes(v, len, extra);
666 if (len == length) {
667 last = data;
668 break;
669 }
670 extra += len;
671 }
672 } else if (extra) {
673 /* Source address is aligned. */
674 u32 *addr = (u32 *) ss->sge.vaddr;
675 int shift = extra * BITS_PER_BYTE;
676 int ushift = 32 - shift;
677 u32 l = len;
678
679 while (l >= sizeof(u32)) {
680 u32 v = *addr;
681
682 data |= set_upper_bits(v, shift);
683 __raw_writel(data, piobuf);
684 data = get_upper_bits(v, ushift);
685 piobuf++;
686 addr++;
687 l -= sizeof(u32);
688 }
689 /*
690 * We still have 'extra' number of bytes leftover.
691 */
692 if (l) {
693 u32 v = *addr;
694
695 if (l + extra >= sizeof(u32)) {
696 data |= set_upper_bits(v, shift);
697 len -= l + extra - sizeof(u32);
698 if (len == length) {
699 last = data;
700 break;
701 }
702 __raw_writel(data, piobuf);
703 piobuf++;
704 extra = 0;
705 data = 0;
706 } else {
707 /* Clear unused upper bytes */
708 data |= clear_upper_bytes(v, l,
709 extra);
710 if (len == length) {
711 last = data;
712 break;
713 }
714 extra += l;
715 }
716 } else if (len == length) {
717 last = data;
718 break;
719 }
720 } else if (len == length) {
721 u32 w;
722
723 /*
724 * Need to round up for the last dword in the
725 * packet.
726 */
727 w = (len + 3) >> 2;
728 __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
729 piobuf += w - 1;
730 last = ((u32 *) ss->sge.vaddr)[w - 1];
731 break;
732 } else {
733 u32 w = len >> 2;
734
735 __iowrite32_copy(piobuf, ss->sge.vaddr, w);
736 piobuf += w;
737
738 extra = len & (sizeof(u32) - 1);
739 if (extra) {
740 u32 v = ((u32 *) ss->sge.vaddr)[w];
741
742 /* Clear unused upper bytes */
743 data = clear_upper_bytes(v, extra, 0);
744 }
745 }
746 update_sge(ss, len);
747 length -= len;
748 }
749 /* Update address before sending packet. */
750 update_sge(ss, length);
751 /* must flush early everything before trigger word */
752 ipath_flush_wc();
753 __raw_writel(last, piobuf);
754 /* be sure trigger word is written */
755 ipath_flush_wc();
756}
757
758/**
759 * ipath_verbs_send - send a packet
760 * @dd: the infinipath device
761 * @hdrwords: the number of words in the header
762 * @hdr: the packet header
763 * @len: the length of the packet in bytes
764 * @ss: the SGE to send
765 */
766int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
767 u32 *hdr, u32 len, struct ipath_sge_state *ss)
768{
769 u32 __iomem *piobuf;
770 u32 plen;
771 int ret;
772
773 /* +1 is for the qword padding of pbc */
774 plen = hdrwords + ((len + 3) >> 2) + 1;
775 if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
776 ipath_dbg("packet len 0x%x too long, failing\n", plen);
777 ret = -EINVAL;
778 goto bail;
779 }
780
781 /* Get a PIO buffer to use. */
782 piobuf = ipath_getpiobuf(dd, NULL);
783 if (unlikely(piobuf == NULL)) {
784 ret = -EBUSY;
785 goto bail;
786 }
787
788 /*
789 * Write len to control qword, no flags.
790 * We have to flush after the PBC for correctness on some cpus
791 * or WC buffer can be written out of order.
792 */
793 writeq(plen, piobuf);
794 ipath_flush_wc();
795 piobuf += 2;
796 if (len == 0) {
797 /*
798 * If there is just the header portion, must flush before
799 * writing last word of header for correctness, and after
800 * the last header word (trigger word).
801 */
802 __iowrite32_copy(piobuf, hdr, hdrwords - 1);
803 ipath_flush_wc();
804 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
805 ipath_flush_wc();
806 ret = 0;
807 goto bail;
808 }
809
810 __iowrite32_copy(piobuf, hdr, hdrwords);
811 piobuf += hdrwords;
812
813 /* The common case is aligned and contained in one segment. */
814 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
815 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
816 u32 w;
817 u32 *addr = (u32 *) ss->sge.vaddr;
818
819 /* Update address before sending packet. */
820 update_sge(ss, len);
821 /* Need to round up for the last dword in the packet. */
822 w = (len + 3) >> 2;
823 __iowrite32_copy(piobuf, addr, w - 1);
824 /* must flush early everything before trigger word */
825 ipath_flush_wc();
826 __raw_writel(addr[w - 1], piobuf + w - 1);
827 /* be sure trigger word is written */
828 ipath_flush_wc();
829 ret = 0;
830 goto bail;
831 }
832 copy_io(piobuf, ss, len);
833 ret = 0;
834
835bail:
836 return ret;
837}
838
839int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
840 u64 *rwords, u64 *spkts, u64 *rpkts,
841 u64 *xmit_wait)
842{
843 int ret;
844
845 if (!(dd->ipath_flags & IPATH_INITTED)) {
846 /* no hardware, freeze, etc. */
847 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
848 ret = -EINVAL;
849 goto bail;
850 }
851 *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
852 *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
853 *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
854 *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
855 *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
856
857 ret = 0;
858
859bail:
860 return ret;
861}
862
863/**
864 * ipath_get_counters - get various chip counters
865 * @dd: the infinipath device
866 * @cntrs: counters are placed here
867 *
868 * Return the counters needed by recv_pma_get_portcounters().
869 */
870int ipath_get_counters(struct ipath_devdata *dd,
871 struct ipath_verbs_counters *cntrs)
872{
873 int ret;
874
875 if (!(dd->ipath_flags & IPATH_INITTED)) {
876 /* no hardware, freeze, etc. */
877 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
878 ret = -EINVAL;
879 goto bail;
880 }
881 cntrs->symbol_error_counter =
882 ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
883 cntrs->link_error_recovery_counter =
884 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
885 /*
886 * The link downed counter counts when the other side downs the
887 * connection. We add in the number of times we downed the link
888 * due to local link integrity errors to compensate.
889 */
890 cntrs->link_downed_counter =
891 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
892 cntrs->port_rcv_errors =
893 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
894 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
895 ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
896 ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
897 ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
898 ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
899 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
900 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
901 ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
902 cntrs->port_rcv_remphys_errors =
903 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
904 cntrs->port_xmit_discards =
905 ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt);
906 cntrs->port_xmit_data =
907 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
908 cntrs->port_rcv_data =
909 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
910 cntrs->port_xmit_packets =
911 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
912 cntrs->port_rcv_packets =
913 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
914 cntrs->local_link_integrity_errors = dd->ipath_lli_errors;
915 cntrs->excessive_buffer_overrun_errors = 0; /* XXX */
916
917 ret = 0;
918
919bail:
920 return ret;
921}
922
584/** 923/**
585 * ipath_ib_piobufavail - callback when a PIO buffer is available 924 * ipath_ib_piobufavail - callback when a PIO buffer is available
586 * @arg: the device pointer 925 * @arg: the device pointer
@@ -591,9 +930,8 @@ static void ipath_ib_timer(void *arg)
591 * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and 930 * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and
592 * return zero). 931 * return zero).
593 */ 932 */
594static int ipath_ib_piobufavail(void *arg) 933int ipath_ib_piobufavail(struct ipath_ibdev *dev)
595{ 934{
596 struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
597 struct ipath_qp *qp; 935 struct ipath_qp *qp;
598 unsigned long flags; 936 unsigned long flags;
599 937
@@ -624,14 +962,14 @@ static int ipath_query_device(struct ib_device *ibdev,
624 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | 962 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
625 IB_DEVICE_SYS_IMAGE_GUID; 963 IB_DEVICE_SYS_IMAGE_GUID;
626 props->page_size_cap = PAGE_SIZE; 964 props->page_size_cap = PAGE_SIZE;
627 props->vendor_id = ipath_layer_get_vendorid(dev->dd); 965 props->vendor_id = dev->dd->ipath_vendorid;
628 props->vendor_part_id = ipath_layer_get_deviceid(dev->dd); 966 props->vendor_part_id = dev->dd->ipath_deviceid;
629 props->hw_ver = ipath_layer_get_pcirev(dev->dd); 967 props->hw_ver = dev->dd->ipath_pcirev;
630 968
631 props->sys_image_guid = dev->sys_image_guid; 969 props->sys_image_guid = dev->sys_image_guid;
632 970
633 props->max_mr_size = ~0ull; 971 props->max_mr_size = ~0ull;
634 props->max_qp = dev->qp_table.max; 972 props->max_qp = ib_ipath_max_qps;
635 props->max_qp_wr = ib_ipath_max_qp_wrs; 973 props->max_qp_wr = ib_ipath_max_qp_wrs;
636 props->max_sge = ib_ipath_max_sges; 974 props->max_sge = ib_ipath_max_sges;
637 props->max_cq = ib_ipath_max_cqs; 975 props->max_cq = ib_ipath_max_cqs;
@@ -647,7 +985,7 @@ static int ipath_query_device(struct ib_device *ibdev,
647 props->max_srq_sge = ib_ipath_max_srq_sges; 985 props->max_srq_sge = ib_ipath_max_srq_sges;
648 /* props->local_ca_ack_delay */ 986 /* props->local_ca_ack_delay */
649 props->atomic_cap = IB_ATOMIC_HCA; 987 props->atomic_cap = IB_ATOMIC_HCA;
650 props->max_pkeys = ipath_layer_get_npkeys(dev->dd); 988 props->max_pkeys = ipath_get_npkeys(dev->dd);
651 props->max_mcast_grp = ib_ipath_max_mcast_grps; 989 props->max_mcast_grp = ib_ipath_max_mcast_grps;
652 props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached; 990 props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached;
653 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 991 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
@@ -672,12 +1010,17 @@ const u8 ipath_cvt_physportstate[16] = {
672 [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] = 6, 1010 [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] = 6,
673}; 1011};
674 1012
1013u32 ipath_get_cr_errpkey(struct ipath_devdata *dd)
1014{
1015 return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
1016}
1017
675static int ipath_query_port(struct ib_device *ibdev, 1018static int ipath_query_port(struct ib_device *ibdev,
676 u8 port, struct ib_port_attr *props) 1019 u8 port, struct ib_port_attr *props)
677{ 1020{
678 struct ipath_ibdev *dev = to_idev(ibdev); 1021 struct ipath_ibdev *dev = to_idev(ibdev);
679 enum ib_mtu mtu; 1022 enum ib_mtu mtu;
680 u16 lid = ipath_layer_get_lid(dev->dd); 1023 u16 lid = dev->dd->ipath_lid;
681 u64 ibcstat; 1024 u64 ibcstat;
682 1025
683 memset(props, 0, sizeof(*props)); 1026 memset(props, 0, sizeof(*props));
@@ -685,16 +1028,16 @@ static int ipath_query_port(struct ib_device *ibdev,
685 props->lmc = dev->mkeyprot_resv_lmc & 7; 1028 props->lmc = dev->mkeyprot_resv_lmc & 7;
686 props->sm_lid = dev->sm_lid; 1029 props->sm_lid = dev->sm_lid;
687 props->sm_sl = dev->sm_sl; 1030 props->sm_sl = dev->sm_sl;
688 ibcstat = ipath_layer_get_lastibcstat(dev->dd); 1031 ibcstat = dev->dd->ipath_lastibcstat;
689 props->state = ((ibcstat >> 4) & 0x3) + 1; 1032 props->state = ((ibcstat >> 4) & 0x3) + 1;
690 /* See phys_state_show() */ 1033 /* See phys_state_show() */
691 props->phys_state = ipath_cvt_physportstate[ 1034 props->phys_state = ipath_cvt_physportstate[
692 ipath_layer_get_lastibcstat(dev->dd) & 0xf]; 1035 dev->dd->ipath_lastibcstat & 0xf];
693 props->port_cap_flags = dev->port_cap_flags; 1036 props->port_cap_flags = dev->port_cap_flags;
694 props->gid_tbl_len = 1; 1037 props->gid_tbl_len = 1;
695 props->max_msg_sz = 0x80000000; 1038 props->max_msg_sz = 0x80000000;
696 props->pkey_tbl_len = ipath_layer_get_npkeys(dev->dd); 1039 props->pkey_tbl_len = ipath_get_npkeys(dev->dd);
697 props->bad_pkey_cntr = ipath_layer_get_cr_errpkey(dev->dd) - 1040 props->bad_pkey_cntr = ipath_get_cr_errpkey(dev->dd) -
698 dev->z_pkey_violations; 1041 dev->z_pkey_violations;
699 props->qkey_viol_cntr = dev->qkey_violations; 1042 props->qkey_viol_cntr = dev->qkey_violations;
700 props->active_width = IB_WIDTH_4X; 1043 props->active_width = IB_WIDTH_4X;
@@ -704,7 +1047,7 @@ static int ipath_query_port(struct ib_device *ibdev,
704 props->init_type_reply = 0; 1047 props->init_type_reply = 0;
705 1048
706 props->max_mtu = IB_MTU_4096; 1049 props->max_mtu = IB_MTU_4096;
707 switch (ipath_layer_get_ibmtu(dev->dd)) { 1050 switch (dev->dd->ipath_ibmtu) {
708 case 4096: 1051 case 4096:
709 mtu = IB_MTU_4096; 1052 mtu = IB_MTU_4096;
710 break; 1053 break;
@@ -763,7 +1106,7 @@ static int ipath_modify_port(struct ib_device *ibdev,
763 dev->port_cap_flags |= props->set_port_cap_mask; 1106 dev->port_cap_flags |= props->set_port_cap_mask;
764 dev->port_cap_flags &= ~props->clr_port_cap_mask; 1107 dev->port_cap_flags &= ~props->clr_port_cap_mask;
765 if (port_modify_mask & IB_PORT_SHUTDOWN) 1108 if (port_modify_mask & IB_PORT_SHUTDOWN)
766 ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKDOWN); 1109 ipath_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
767 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR) 1110 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
768 dev->qkey_violations = 0; 1111 dev->qkey_violations = 0;
769 return 0; 1112 return 0;
@@ -780,7 +1123,7 @@ static int ipath_query_gid(struct ib_device *ibdev, u8 port,
780 goto bail; 1123 goto bail;
781 } 1124 }
782 gid->global.subnet_prefix = dev->gid_prefix; 1125 gid->global.subnet_prefix = dev->gid_prefix;
783 gid->global.interface_id = ipath_layer_get_guid(dev->dd); 1126 gid->global.interface_id = dev->dd->ipath_guid;
784 1127
785 ret = 0; 1128 ret = 0;
786 1129
@@ -803,18 +1146,22 @@ static struct ib_pd *ipath_alloc_pd(struct ib_device *ibdev,
803 * we allow allocations of more than we report for this value. 1146 * we allow allocations of more than we report for this value.
804 */ 1147 */
805 1148
806 if (dev->n_pds_allocated == ib_ipath_max_pds) { 1149 pd = kmalloc(sizeof *pd, GFP_KERNEL);
1150 if (!pd) {
807 ret = ERR_PTR(-ENOMEM); 1151 ret = ERR_PTR(-ENOMEM);
808 goto bail; 1152 goto bail;
809 } 1153 }
810 1154
811 pd = kmalloc(sizeof *pd, GFP_KERNEL); 1155 spin_lock(&dev->n_pds_lock);
812 if (!pd) { 1156 if (dev->n_pds_allocated == ib_ipath_max_pds) {
1157 spin_unlock(&dev->n_pds_lock);
1158 kfree(pd);
813 ret = ERR_PTR(-ENOMEM); 1159 ret = ERR_PTR(-ENOMEM);
814 goto bail; 1160 goto bail;
815 } 1161 }
816 1162
817 dev->n_pds_allocated++; 1163 dev->n_pds_allocated++;
1164 spin_unlock(&dev->n_pds_lock);
818 1165
819 /* ib_alloc_pd() will initialize pd->ibpd. */ 1166 /* ib_alloc_pd() will initialize pd->ibpd. */
820 pd->user = udata != NULL; 1167 pd->user = udata != NULL;
@@ -830,7 +1177,9 @@ static int ipath_dealloc_pd(struct ib_pd *ibpd)
830 struct ipath_pd *pd = to_ipd(ibpd); 1177 struct ipath_pd *pd = to_ipd(ibpd);
831 struct ipath_ibdev *dev = to_idev(ibpd->device); 1178 struct ipath_ibdev *dev = to_idev(ibpd->device);
832 1179
1180 spin_lock(&dev->n_pds_lock);
833 dev->n_pds_allocated--; 1181 dev->n_pds_allocated--;
1182 spin_unlock(&dev->n_pds_lock);
834 1183
835 kfree(pd); 1184 kfree(pd);
836 1185
@@ -851,11 +1200,6 @@ static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
851 struct ib_ah *ret; 1200 struct ib_ah *ret;
852 struct ipath_ibdev *dev = to_idev(pd->device); 1201 struct ipath_ibdev *dev = to_idev(pd->device);
853 1202
854 if (dev->n_ahs_allocated == ib_ipath_max_ahs) {
855 ret = ERR_PTR(-ENOMEM);
856 goto bail;
857 }
858
859 /* A multicast address requires a GRH (see ch. 8.4.1). */ 1203 /* A multicast address requires a GRH (see ch. 8.4.1). */
860 if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE && 1204 if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
861 ah_attr->dlid != IPATH_PERMISSIVE_LID && 1205 ah_attr->dlid != IPATH_PERMISSIVE_LID &&
@@ -881,7 +1225,16 @@ static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
881 goto bail; 1225 goto bail;
882 } 1226 }
883 1227
1228 spin_lock(&dev->n_ahs_lock);
1229 if (dev->n_ahs_allocated == ib_ipath_max_ahs) {
1230 spin_unlock(&dev->n_ahs_lock);
1231 kfree(ah);
1232 ret = ERR_PTR(-ENOMEM);
1233 goto bail;
1234 }
1235
884 dev->n_ahs_allocated++; 1236 dev->n_ahs_allocated++;
1237 spin_unlock(&dev->n_ahs_lock);
885 1238
886 /* ib_create_ah() will initialize ah->ibah. */ 1239 /* ib_create_ah() will initialize ah->ibah. */
887 ah->attr = *ah_attr; 1240 ah->attr = *ah_attr;
@@ -903,7 +1256,9 @@ static int ipath_destroy_ah(struct ib_ah *ibah)
903 struct ipath_ibdev *dev = to_idev(ibah->device); 1256 struct ipath_ibdev *dev = to_idev(ibah->device);
904 struct ipath_ah *ah = to_iah(ibah); 1257 struct ipath_ah *ah = to_iah(ibah);
905 1258
1259 spin_lock(&dev->n_ahs_lock);
906 dev->n_ahs_allocated--; 1260 dev->n_ahs_allocated--;
1261 spin_unlock(&dev->n_ahs_lock);
907 1262
908 kfree(ah); 1263 kfree(ah);
909 1264
@@ -919,25 +1274,50 @@ static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
919 return 0; 1274 return 0;
920} 1275}
921 1276
1277/**
1278 * ipath_get_npkeys - return the size of the PKEY table for port 0
1279 * @dd: the infinipath device
1280 */
1281unsigned ipath_get_npkeys(struct ipath_devdata *dd)
1282{
1283 return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
1284}
1285
1286/**
1287 * ipath_get_pkey - return the indexed PKEY from the port 0 PKEY table
1288 * @dd: the infinipath device
1289 * @index: the PKEY index
1290 */
1291unsigned ipath_get_pkey(struct ipath_devdata *dd, unsigned index)
1292{
1293 unsigned ret;
1294
1295 if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
1296 ret = 0;
1297 else
1298 ret = dd->ipath_pd[0]->port_pkeys[index];
1299
1300 return ret;
1301}
1302
922static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 1303static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
923 u16 *pkey) 1304 u16 *pkey)
924{ 1305{
925 struct ipath_ibdev *dev = to_idev(ibdev); 1306 struct ipath_ibdev *dev = to_idev(ibdev);
926 int ret; 1307 int ret;
927 1308
928 if (index >= ipath_layer_get_npkeys(dev->dd)) { 1309 if (index >= ipath_get_npkeys(dev->dd)) {
929 ret = -EINVAL; 1310 ret = -EINVAL;
930 goto bail; 1311 goto bail;
931 } 1312 }
932 1313
933 *pkey = ipath_layer_get_pkey(dev->dd, index); 1314 *pkey = ipath_get_pkey(dev->dd, index);
934 ret = 0; 1315 ret = 0;
935 1316
936bail: 1317bail:
937 return ret; 1318 return ret;
938} 1319}
939 1320
940
941/** 1321/**
942 * ipath_alloc_ucontext - allocate a ucontest 1322 * ipath_alloc_ucontext - allocate a ucontest
943 * @ibdev: the infiniband device 1323 * @ibdev: the infiniband device
@@ -970,26 +1350,91 @@ static int ipath_dealloc_ucontext(struct ib_ucontext *context)
970 1350
971static int ipath_verbs_register_sysfs(struct ib_device *dev); 1351static int ipath_verbs_register_sysfs(struct ib_device *dev);
972 1352
1353static void __verbs_timer(unsigned long arg)
1354{
1355 struct ipath_devdata *dd = (struct ipath_devdata *) arg;
1356
1357 /*
1358 * If port 0 receive packet interrupts are not available, or
1359 * can be missed, poll the receive queue
1360 */
1361 if (dd->ipath_flags & IPATH_POLL_RX_INTR)
1362 ipath_kreceive(dd);
1363
1364 /* Handle verbs layer timeouts. */
1365 ipath_ib_timer(dd->verbs_dev);
1366
1367 mod_timer(&dd->verbs_timer, jiffies + 1);
1368}
1369
1370static int enable_timer(struct ipath_devdata *dd)
1371{
1372 /*
1373 * Early chips had a design flaw where the chip and kernel idea
1374 * of the tail register don't always agree, and therefore we won't
1375 * get an interrupt on the next packet received.
1376 * If the board supports per packet receive interrupts, use it.
1377 * Otherwise, the timer function periodically checks for packets
1378 * to cover this case.
1379 * Either way, the timer is needed for verbs layer related
1380 * processing.
1381 */
1382 if (dd->ipath_flags & IPATH_GPIO_INTR) {
1383 ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
1384 0x2074076542310ULL);
1385 /* Enable GPIO bit 2 interrupt */
1386 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1387 (u64) (1 << 2));
1388 }
1389
1390 init_timer(&dd->verbs_timer);
1391 dd->verbs_timer.function = __verbs_timer;
1392 dd->verbs_timer.data = (unsigned long)dd;
1393 dd->verbs_timer.expires = jiffies + 1;
1394 add_timer(&dd->verbs_timer);
1395
1396 return 0;
1397}
1398
1399static int disable_timer(struct ipath_devdata *dd)
1400{
1401 /* Disable GPIO bit 2 interrupt */
1402 if (dd->ipath_flags & IPATH_GPIO_INTR)
1403 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0);
1404
1405 del_timer_sync(&dd->verbs_timer);
1406
1407 return 0;
1408}
1409
973/** 1410/**
974 * ipath_register_ib_device - register our device with the infiniband core 1411 * ipath_register_ib_device - register our device with the infiniband core
975 * @unit: the device number to register
976 * @dd: the device data structure 1412 * @dd: the device data structure
977 * Return the allocated ipath_ibdev pointer or NULL on error. 1413 * Return the allocated ipath_ibdev pointer or NULL on error.
978 */ 1414 */
979static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd) 1415int ipath_register_ib_device(struct ipath_devdata *dd)
980{ 1416{
981 struct ipath_layer_counters cntrs; 1417 struct ipath_verbs_counters cntrs;
982 struct ipath_ibdev *idev; 1418 struct ipath_ibdev *idev;
983 struct ib_device *dev; 1419 struct ib_device *dev;
984 int ret; 1420 int ret;
985 1421
986 idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev); 1422 idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev);
987 if (idev == NULL) 1423 if (idev == NULL) {
1424 ret = -ENOMEM;
988 goto bail; 1425 goto bail;
1426 }
989 1427
990 dev = &idev->ibdev; 1428 dev = &idev->ibdev;
991 1429
992 /* Only need to initialize non-zero fields. */ 1430 /* Only need to initialize non-zero fields. */
1431 spin_lock_init(&idev->n_pds_lock);
1432 spin_lock_init(&idev->n_ahs_lock);
1433 spin_lock_init(&idev->n_cqs_lock);
1434 spin_lock_init(&idev->n_qps_lock);
1435 spin_lock_init(&idev->n_srqs_lock);
1436 spin_lock_init(&idev->n_mcast_grps_lock);
1437
993 spin_lock_init(&idev->qp_table.lock); 1438 spin_lock_init(&idev->qp_table.lock);
994 spin_lock_init(&idev->lk_table.lock); 1439 spin_lock_init(&idev->lk_table.lock);
995 idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE); 1440 idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE);
@@ -1030,7 +1475,7 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd)
1030 idev->link_width_enabled = 3; /* 1x or 4x */ 1475 idev->link_width_enabled = 3; /* 1x or 4x */
1031 1476
1032 /* Snapshot current HW counters to "clear" them. */ 1477 /* Snapshot current HW counters to "clear" them. */
1033 ipath_layer_get_counters(dd, &cntrs); 1478 ipath_get_counters(dd, &cntrs);
1034 idev->z_symbol_error_counter = cntrs.symbol_error_counter; 1479 idev->z_symbol_error_counter = cntrs.symbol_error_counter;
1035 idev->z_link_error_recovery_counter = 1480 idev->z_link_error_recovery_counter =
1036 cntrs.link_error_recovery_counter; 1481 cntrs.link_error_recovery_counter;
@@ -1054,14 +1499,14 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd)
1054 * device types in the system, we can't be sure this is unique. 1499 * device types in the system, we can't be sure this is unique.
1055 */ 1500 */
1056 if (!sys_image_guid) 1501 if (!sys_image_guid)
1057 sys_image_guid = ipath_layer_get_guid(dd); 1502 sys_image_guid = dd->ipath_guid;
1058 idev->sys_image_guid = sys_image_guid; 1503 idev->sys_image_guid = sys_image_guid;
1059 idev->ib_unit = unit; 1504 idev->ib_unit = dd->ipath_unit;
1060 idev->dd = dd; 1505 idev->dd = dd;
1061 1506
1062 strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX); 1507 strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX);
1063 dev->owner = THIS_MODULE; 1508 dev->owner = THIS_MODULE;
1064 dev->node_guid = ipath_layer_get_guid(dd); 1509 dev->node_guid = dd->ipath_guid;
1065 dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION; 1510 dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION;
1066 dev->uverbs_cmd_mask = 1511 dev->uverbs_cmd_mask =
1067 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 1512 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
@@ -1093,9 +1538,9 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd)
1093 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | 1538 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
1094 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 1539 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
1095 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); 1540 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
1096 dev->node_type = IB_NODE_CA; 1541 dev->node_type = RDMA_NODE_IB_CA;
1097 dev->phys_port_cnt = 1; 1542 dev->phys_port_cnt = 1;
1098 dev->dma_device = ipath_layer_get_device(dd); 1543 dev->dma_device = &dd->pcidev->dev;
1099 dev->class_dev.dev = dev->dma_device; 1544 dev->class_dev.dev = dev->dma_device;
1100 dev->query_device = ipath_query_device; 1545 dev->query_device = ipath_query_device;
1101 dev->modify_device = ipath_modify_device; 1546 dev->modify_device = ipath_modify_device;
@@ -1137,9 +1582,10 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd)
1137 dev->attach_mcast = ipath_multicast_attach; 1582 dev->attach_mcast = ipath_multicast_attach;
1138 dev->detach_mcast = ipath_multicast_detach; 1583 dev->detach_mcast = ipath_multicast_detach;
1139 dev->process_mad = ipath_process_mad; 1584 dev->process_mad = ipath_process_mad;
1585 dev->mmap = ipath_mmap;
1140 1586
1141 snprintf(dev->node_desc, sizeof(dev->node_desc), 1587 snprintf(dev->node_desc, sizeof(dev->node_desc),
1142 IPATH_IDSTR " %s kernel_SMA", system_utsname.nodename); 1588 IPATH_IDSTR " %s", system_utsname.nodename);
1143 1589
1144 ret = ib_register_device(dev); 1590 ret = ib_register_device(dev);
1145 if (ret) 1591 if (ret)
@@ -1148,7 +1594,7 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd)
1148 if (ipath_verbs_register_sysfs(dev)) 1594 if (ipath_verbs_register_sysfs(dev))
1149 goto err_class; 1595 goto err_class;
1150 1596
1151 ipath_layer_enable_timer(dd); 1597 enable_timer(dd);
1152 1598
1153 goto bail; 1599 goto bail;
1154 1600
@@ -1160,37 +1606,32 @@ err_lk:
1160 kfree(idev->qp_table.table); 1606 kfree(idev->qp_table.table);
1161err_qp: 1607err_qp:
1162 ib_dealloc_device(dev); 1608 ib_dealloc_device(dev);
1163 _VERBS_ERROR("ib_ipath%d cannot register verbs (%d)!\n", 1609 ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret);
1164 unit, -ret);
1165 idev = NULL; 1610 idev = NULL;
1166 1611
1167bail: 1612bail:
1168 return idev; 1613 dd->verbs_dev = idev;
1614 return ret;
1169} 1615}
1170 1616
1171static void ipath_unregister_ib_device(void *arg) 1617void ipath_unregister_ib_device(struct ipath_ibdev *dev)
1172{ 1618{
1173 struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
1174 struct ib_device *ibdev = &dev->ibdev; 1619 struct ib_device *ibdev = &dev->ibdev;
1175 1620
1176 ipath_layer_disable_timer(dev->dd); 1621 disable_timer(dev->dd);
1177 1622
1178 ib_unregister_device(ibdev); 1623 ib_unregister_device(ibdev);
1179 1624
1180 if (!list_empty(&dev->pending[0]) || 1625 if (!list_empty(&dev->pending[0]) ||
1181 !list_empty(&dev->pending[1]) || 1626 !list_empty(&dev->pending[1]) ||
1182 !list_empty(&dev->pending[2])) 1627 !list_empty(&dev->pending[2]))
1183 _VERBS_ERROR("ipath%d pending list not empty!\n", 1628 ipath_dev_err(dev->dd, "pending list not empty!\n");
1184 dev->ib_unit);
1185 if (!list_empty(&dev->piowait)) 1629 if (!list_empty(&dev->piowait))
1186 _VERBS_ERROR("ipath%d piowait list not empty!\n", 1630 ipath_dev_err(dev->dd, "piowait list not empty!\n");
1187 dev->ib_unit);
1188 if (!list_empty(&dev->rnrwait)) 1631 if (!list_empty(&dev->rnrwait))
1189 _VERBS_ERROR("ipath%d rnrwait list not empty!\n", 1632 ipath_dev_err(dev->dd, "rnrwait list not empty!\n");
1190 dev->ib_unit);
1191 if (!ipath_mcast_tree_empty()) 1633 if (!ipath_mcast_tree_empty())
1192 _VERBS_ERROR("ipath%d multicast table memory leak!\n", 1634 ipath_dev_err(dev->dd, "multicast table memory leak!\n");
1193 dev->ib_unit);
1194 /* 1635 /*
1195 * Note that ipath_unregister_ib_device() can be called before all 1636 * Note that ipath_unregister_ib_device() can be called before all
1196 * the QPs are destroyed! 1637 * the QPs are destroyed!
@@ -1201,25 +1642,12 @@ static void ipath_unregister_ib_device(void *arg)
1201 ib_dealloc_device(ibdev); 1642 ib_dealloc_device(ibdev);
1202} 1643}
1203 1644
1204static int __init ipath_verbs_init(void)
1205{
1206 return ipath_verbs_register(ipath_register_ib_device,
1207 ipath_unregister_ib_device,
1208 ipath_ib_piobufavail, ipath_ib_rcv,
1209 ipath_ib_timer);
1210}
1211
1212static void __exit ipath_verbs_cleanup(void)
1213{
1214 ipath_verbs_unregister();
1215}
1216
1217static ssize_t show_rev(struct class_device *cdev, char *buf) 1645static ssize_t show_rev(struct class_device *cdev, char *buf)
1218{ 1646{
1219 struct ipath_ibdev *dev = 1647 struct ipath_ibdev *dev =
1220 container_of(cdev, struct ipath_ibdev, ibdev.class_dev); 1648 container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
1221 1649
1222 return sprintf(buf, "%x\n", ipath_layer_get_pcirev(dev->dd)); 1650 return sprintf(buf, "%x\n", dev->dd->ipath_pcirev);
1223} 1651}
1224 1652
1225static ssize_t show_hca(struct class_device *cdev, char *buf) 1653static ssize_t show_hca(struct class_device *cdev, char *buf)
@@ -1228,7 +1656,7 @@ static ssize_t show_hca(struct class_device *cdev, char *buf)
1228 container_of(cdev, struct ipath_ibdev, ibdev.class_dev); 1656 container_of(cdev, struct ipath_ibdev, ibdev.class_dev);
1229 int ret; 1657 int ret;
1230 1658
1231 ret = ipath_layer_get_boardname(dev->dd, buf, 128); 1659 ret = dev->dd->ipath_f_get_boardname(dev->dd, buf, 128);
1232 if (ret < 0) 1660 if (ret < 0)
1233 goto bail; 1661 goto bail;
1234 strcat(buf, "\n"); 1662 strcat(buf, "\n");
@@ -1305,6 +1733,3 @@ static int ipath_verbs_register_sysfs(struct ib_device *dev)
1305bail: 1733bail:
1306 return ret; 1734 return ret;
1307} 1735}
1308
1309module_init(ipath_verbs_init);
1310module_exit(ipath_verbs_cleanup);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 2df684727dc1..09bbb3f9a217 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -38,10 +38,10 @@
38#include <linux/spinlock.h> 38#include <linux/spinlock.h>
39#include <linux/kernel.h> 39#include <linux/kernel.h>
40#include <linux/interrupt.h> 40#include <linux/interrupt.h>
41#include <linux/kref.h>
41#include <rdma/ib_pack.h> 42#include <rdma/ib_pack.h>
42 43
43#include "ipath_layer.h" 44#include "ipath_layer.h"
44#include "verbs_debug.h"
45 45
46#define QPN_MAX (1 << 24) 46#define QPN_MAX (1 << 24)
47#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE) 47#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
@@ -50,7 +50,7 @@
50 * Increment this value if any changes that break userspace ABI 50 * Increment this value if any changes that break userspace ABI
51 * compatibility are made. 51 * compatibility are made.
52 */ 52 */
53#define IPATH_UVERBS_ABI_VERSION 1 53#define IPATH_UVERBS_ABI_VERSION 2
54 54
55/* 55/*
56 * Define an ib_cq_notify value that is not valid so we know when CQ 56 * Define an ib_cq_notify value that is not valid so we know when CQ
@@ -152,19 +152,6 @@ struct ipath_mcast {
152 int n_attached; 152 int n_attached;
153}; 153};
154 154
155/* Memory region */
156struct ipath_mr {
157 struct ib_mr ibmr;
158 struct ipath_mregion mr; /* must be last */
159};
160
161/* Fast memory region */
162struct ipath_fmr {
163 struct ib_fmr ibfmr;
164 u8 page_shift;
165 struct ipath_mregion mr; /* must be last */
166};
167
168/* Protection domain */ 155/* Protection domain */
169struct ipath_pd { 156struct ipath_pd {
170 struct ib_pd ibpd; 157 struct ib_pd ibpd;
@@ -178,58 +165,89 @@ struct ipath_ah {
178}; 165};
179 166
180/* 167/*
181 * Quick description of our CQ/QP locking scheme: 168 * This structure is used by ipath_mmap() to validate an offset
182 * 169 * when an mmap() request is made. The vm_area_struct then uses
183 * We have one global lock that protects dev->cq/qp_table. Each 170 * this as its vm_private_data.
184 * struct ipath_cq/qp also has its own lock. An individual qp lock 171 */
185 * may be taken inside of an individual cq lock. Both cqs attached to 172struct ipath_mmap_info {
186 * a qp may be locked, with the send cq locked first. No other 173 struct ipath_mmap_info *next;
187 * nesting should be done. 174 struct ib_ucontext *context;
188 * 175 void *obj;
189 * Each struct ipath_cq/qp also has an atomic_t ref count. The 176 struct kref ref;
190 * pointer from the cq/qp_table to the struct counts as one reference. 177 unsigned size;
191 * This reference also is good for access through the consumer API, so 178 unsigned mmap_cnt;
192 * modifying the CQ/QP etc doesn't need to take another reference. 179};
193 * Access because of a completion being polled does need a reference. 180
194 * 181/*
195 * Finally, each struct ipath_cq/qp has a wait_queue_head_t for the 182 * This structure is used to contain the head pointer, tail pointer,
196 * destroy function to sleep on. 183 * and completion queue entries as a single memory allocation so
197 * 184 * it can be mmap'ed into user space.
198 * This means that access from the consumer API requires nothing but
199 * taking the struct's lock.
200 *
201 * Access because of a completion event should go as follows:
202 * - lock cq/qp_table and look up struct
203 * - increment ref count in struct
204 * - drop cq/qp_table lock
205 * - lock struct, do your thing, and unlock struct
206 * - decrement ref count; if zero, wake up waiters
207 *
208 * To destroy a CQ/QP, we can do the following:
209 * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock
210 * - decrement ref count
211 * - wait_event until ref count is zero
212 *
213 * It is the consumer's responsibilty to make sure that no QP
214 * operations (WQE posting or state modification) are pending when the
215 * QP is destroyed. Also, the consumer must make sure that calls to
216 * qp_modify are serialized.
217 *
218 * Possible optimizations (wait for profile data to see if/where we
219 * have locks bouncing between CPUs):
220 * - split cq/qp table lock into n separate (cache-aligned) locks,
221 * indexed (say) by the page in the table
222 */ 185 */
186struct ipath_cq_wc {
187 u32 head; /* index of next entry to fill */
188 u32 tail; /* index of next ib_poll_cq() entry */
189 struct ib_wc queue[1]; /* this is actually size ibcq.cqe + 1 */
190};
223 191
192/*
193 * The completion queue structure.
194 */
224struct ipath_cq { 195struct ipath_cq {
225 struct ib_cq ibcq; 196 struct ib_cq ibcq;
226 struct tasklet_struct comptask; 197 struct tasklet_struct comptask;
227 spinlock_t lock; 198 spinlock_t lock;
228 u8 notify; 199 u8 notify;
229 u8 triggered; 200 u8 triggered;
230 u32 head; /* new records added to the head */ 201 struct ipath_cq_wc *queue;
231 u32 tail; /* poll_cq() reads from here. */ 202 struct ipath_mmap_info *ip;
232 struct ib_wc *queue; /* this is actually ibcq.cqe + 1 */ 203};
204
205/*
206 * A segment is a linear region of low physical memory.
207 * XXX Maybe we should use phys addr here and kmap()/kunmap().
208 * Used by the verbs layer.
209 */
210struct ipath_seg {
211 void *vaddr;
212 size_t length;
213};
214
215/* The number of ipath_segs that fit in a page. */
216#define IPATH_SEGSZ (PAGE_SIZE / sizeof (struct ipath_seg))
217
218struct ipath_segarray {
219 struct ipath_seg segs[IPATH_SEGSZ];
220};
221
222struct ipath_mregion {
223 u64 user_base; /* User's address for this region */
224 u64 iova; /* IB start address of this region */
225 size_t length;
226 u32 lkey;
227 u32 offset; /* offset (bytes) to start of region */
228 int access_flags;
229 u32 max_segs; /* number of ipath_segs in all the arrays */
230 u32 mapsz; /* size of the map array */
231 struct ipath_segarray *map[0]; /* the segments */
232};
233
234/*
235 * These keep track of the copy progress within a memory region.
236 * Used by the verbs layer.
237 */
238struct ipath_sge {
239 struct ipath_mregion *mr;
240 void *vaddr; /* current pointer into the segment */
241 u32 sge_length; /* length of the SGE */
242 u32 length; /* remaining length of the segment */
243 u16 m; /* current index: mr->map[m] */
244 u16 n; /* current index: mr->map[m]->segs[n] */
245};
246
247/* Memory region */
248struct ipath_mr {
249 struct ib_mr ibmr;
250 struct ipath_mregion mr; /* must be last */
233}; 251};
234 252
235/* 253/*
@@ -248,32 +266,50 @@ struct ipath_swqe {
248 266
249/* 267/*
250 * Receive work request queue entry. 268 * Receive work request queue entry.
251 * The size of the sg_list is determined when the QP is created and stored 269 * The size of the sg_list is determined when the QP (or SRQ) is created
252 * in qp->r_max_sge. 270 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
253 */ 271 */
254struct ipath_rwqe { 272struct ipath_rwqe {
255 u64 wr_id; 273 u64 wr_id;
256 u32 length; /* total length of data in sg_list */
257 u8 num_sge; 274 u8 num_sge;
258 struct ipath_sge sg_list[0]; 275 struct ib_sge sg_list[0];
259}; 276};
260 277
261struct ipath_rq { 278/*
262 spinlock_t lock; 279 * This structure is used to contain the head pointer, tail pointer,
280 * and receive work queue entries as a single memory allocation so
281 * it can be mmap'ed into user space.
282 * Note that the wq array elements are variable size so you can't
283 * just index into the array to get the N'th element;
284 * use get_rwqe_ptr() instead.
285 */
286struct ipath_rwq {
263 u32 head; /* new work requests posted to the head */ 287 u32 head; /* new work requests posted to the head */
264 u32 tail; /* receives pull requests from here. */ 288 u32 tail; /* receives pull requests from here. */
289 struct ipath_rwqe wq[0];
290};
291
292struct ipath_rq {
293 struct ipath_rwq *wq;
294 spinlock_t lock;
265 u32 size; /* size of RWQE array */ 295 u32 size; /* size of RWQE array */
266 u8 max_sge; 296 u8 max_sge;
267 struct ipath_rwqe *wq; /* RWQE array */
268}; 297};
269 298
270struct ipath_srq { 299struct ipath_srq {
271 struct ib_srq ibsrq; 300 struct ib_srq ibsrq;
272 struct ipath_rq rq; 301 struct ipath_rq rq;
302 struct ipath_mmap_info *ip;
273 /* send signal when number of RWQEs < limit */ 303 /* send signal when number of RWQEs < limit */
274 u32 limit; 304 u32 limit;
275}; 305};
276 306
307struct ipath_sge_state {
308 struct ipath_sge *sg_list; /* next SGE to be used if any */
309 struct ipath_sge sge; /* progress state for the current SGE */
310 u8 num_sge;
311};
312
277/* 313/*
278 * Variables prefixed with s_ are for the requester (sender). 314 * Variables prefixed with s_ are for the requester (sender).
279 * Variables prefixed with r_ are for the responder (receiver). 315 * Variables prefixed with r_ are for the responder (receiver).
@@ -293,6 +329,7 @@ struct ipath_qp {
293 atomic_t refcount; 329 atomic_t refcount;
294 wait_queue_head_t wait; 330 wait_queue_head_t wait;
295 struct tasklet_struct s_task; 331 struct tasklet_struct s_task;
332 struct ipath_mmap_info *ip;
296 struct ipath_sge_state *s_cur_sge; 333 struct ipath_sge_state *s_cur_sge;
297 struct ipath_sge_state s_sge; /* current send request data */ 334 struct ipath_sge_state s_sge; /* current send request data */
298 /* current RDMA read send data */ 335 /* current RDMA read send data */
@@ -334,6 +371,7 @@ struct ipath_qp {
334 u8 s_retry; /* requester retry counter */ 371 u8 s_retry; /* requester retry counter */
335 u8 s_rnr_retry; /* requester RNR retry counter */ 372 u8 s_rnr_retry; /* requester RNR retry counter */
336 u8 s_pkey_index; /* PKEY index to use */ 373 u8 s_pkey_index; /* PKEY index to use */
374 u8 timeout; /* Timeout for this QP */
337 enum ib_mtu path_mtu; 375 enum ib_mtu path_mtu;
338 u32 remote_qpn; 376 u32 remote_qpn;
339 u32 qkey; /* QKEY for this QP (for UD or RD) */ 377 u32 qkey; /* QKEY for this QP (for UD or RD) */
@@ -345,7 +383,8 @@ struct ipath_qp {
345 u32 s_ssn; /* SSN of tail entry */ 383 u32 s_ssn; /* SSN of tail entry */
346 u32 s_lsn; /* limit sequence number (credit) */ 384 u32 s_lsn; /* limit sequence number (credit) */
347 struct ipath_swqe *s_wq; /* send work queue */ 385 struct ipath_swqe *s_wq; /* send work queue */
348 struct ipath_rq r_rq; /* receive work queue */ 386 struct ipath_rq r_rq; /* receive work queue */
387 struct ipath_sge r_sg_list[0]; /* verified SGEs */
349}; 388};
350 389
351/* 390/*
@@ -369,15 +408,15 @@ static inline struct ipath_swqe *get_swqe_ptr(struct ipath_qp *qp,
369 408
370/* 409/*
371 * Since struct ipath_rwqe is not a fixed size, we can't simply index into 410 * Since struct ipath_rwqe is not a fixed size, we can't simply index into
372 * struct ipath_rq.wq. This function does the array index computation. 411 * struct ipath_rwq.wq. This function does the array index computation.
373 */ 412 */
374static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq, 413static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq,
375 unsigned n) 414 unsigned n)
376{ 415{
377 return (struct ipath_rwqe *) 416 return (struct ipath_rwqe *)
378 ((char *) rq->wq + 417 ((char *) rq->wq->wq +
379 (sizeof(struct ipath_rwqe) + 418 (sizeof(struct ipath_rwqe) +
380 rq->max_sge * sizeof(struct ipath_sge)) * n); 419 rq->max_sge * sizeof(struct ib_sge)) * n);
381} 420}
382 421
383/* 422/*
@@ -417,6 +456,7 @@ struct ipath_ibdev {
417 struct ib_device ibdev; 456 struct ib_device ibdev;
418 struct list_head dev_list; 457 struct list_head dev_list;
419 struct ipath_devdata *dd; 458 struct ipath_devdata *dd;
459 struct ipath_mmap_info *pending_mmaps;
420 int ib_unit; /* This is the device number */ 460 int ib_unit; /* This is the device number */
421 u16 sm_lid; /* in host order */ 461 u16 sm_lid; /* in host order */
422 u8 sm_sl; 462 u8 sm_sl;
@@ -435,11 +475,20 @@ struct ipath_ibdev {
435 __be64 sys_image_guid; /* in network order */ 475 __be64 sys_image_guid; /* in network order */
436 __be64 gid_prefix; /* in network order */ 476 __be64 gid_prefix; /* in network order */
437 __be64 mkey; 477 __be64 mkey;
478
438 u32 n_pds_allocated; /* number of PDs allocated for device */ 479 u32 n_pds_allocated; /* number of PDs allocated for device */
480 spinlock_t n_pds_lock;
439 u32 n_ahs_allocated; /* number of AHs allocated for device */ 481 u32 n_ahs_allocated; /* number of AHs allocated for device */
482 spinlock_t n_ahs_lock;
440 u32 n_cqs_allocated; /* number of CQs allocated for device */ 483 u32 n_cqs_allocated; /* number of CQs allocated for device */
484 spinlock_t n_cqs_lock;
485 u32 n_qps_allocated; /* number of QPs allocated for device */
486 spinlock_t n_qps_lock;
441 u32 n_srqs_allocated; /* number of SRQs allocated for device */ 487 u32 n_srqs_allocated; /* number of SRQs allocated for device */
488 spinlock_t n_srqs_lock;
442 u32 n_mcast_grps_allocated; /* number of mcast groups allocated */ 489 u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
490 spinlock_t n_mcast_grps_lock;
491
443 u64 ipath_sword; /* total dwords sent (sample result) */ 492 u64 ipath_sword; /* total dwords sent (sample result) */
444 u64 ipath_rword; /* total dwords received (sample result) */ 493 u64 ipath_rword; /* total dwords received (sample result) */
445 u64 ipath_spkts; /* total packets sent (sample result) */ 494 u64 ipath_spkts; /* total packets sent (sample result) */
@@ -494,8 +543,19 @@ struct ipath_ibdev {
494 struct ipath_opcode_stats opstats[128]; 543 struct ipath_opcode_stats opstats[128];
495}; 544};
496 545
497struct ipath_ucontext { 546struct ipath_verbs_counters {
498 struct ib_ucontext ibucontext; 547 u64 symbol_error_counter;
548 u64 link_error_recovery_counter;
549 u64 link_downed_counter;
550 u64 port_rcv_errors;
551 u64 port_rcv_remphys_errors;
552 u64 port_xmit_discards;
553 u64 port_xmit_data;
554 u64 port_rcv_data;
555 u64 port_xmit_packets;
556 u64 port_rcv_packets;
557 u32 local_link_integrity_errors;
558 u32 excessive_buffer_overrun_errors;
499}; 559};
500 560
501static inline struct ipath_mr *to_imr(struct ib_mr *ibmr) 561static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)
@@ -503,11 +563,6 @@ static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)
503 return container_of(ibmr, struct ipath_mr, ibmr); 563 return container_of(ibmr, struct ipath_mr, ibmr);
504} 564}
505 565
506static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr)
507{
508 return container_of(ibfmr, struct ipath_fmr, ibfmr);
509}
510
511static inline struct ipath_pd *to_ipd(struct ib_pd *ibpd) 566static inline struct ipath_pd *to_ipd(struct ib_pd *ibpd)
512{ 567{
513 return container_of(ibpd, struct ipath_pd, ibpd); 568 return container_of(ibpd, struct ipath_pd, ibpd);
@@ -545,12 +600,6 @@ int ipath_process_mad(struct ib_device *ibdev,
545 struct ib_grh *in_grh, 600 struct ib_grh *in_grh,
546 struct ib_mad *in_mad, struct ib_mad *out_mad); 601 struct ib_mad *in_mad, struct ib_mad *out_mad);
547 602
548static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext
549 *ibucontext)
550{
551 return container_of(ibucontext, struct ipath_ucontext, ibucontext);
552}
553
554/* 603/*
555 * Compare the lower 24 bits of the two values. 604 * Compare the lower 24 bits of the two values.
556 * Returns an integer <, ==, or > than zero. 605 * Returns an integer <, ==, or > than zero.
@@ -562,6 +611,13 @@ static inline int ipath_cmp24(u32 a, u32 b)
562 611
563struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid); 612struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid);
564 613
614int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
615 u64 *rwords, u64 *spkts, u64 *rpkts,
616 u64 *xmit_wait);
617
618int ipath_get_counters(struct ipath_devdata *dd,
619 struct ipath_verbs_counters *cntrs);
620
565int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); 621int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
566 622
567int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); 623int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
@@ -579,7 +635,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
579int ipath_destroy_qp(struct ib_qp *ibqp); 635int ipath_destroy_qp(struct ib_qp *ibqp);
580 636
581int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 637int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
582 int attr_mask); 638 int attr_mask, struct ib_udata *udata);
583 639
584int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 640int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
585 int attr_mask, struct ib_qp_init_attr *init_attr); 641 int attr_mask, struct ib_qp_init_attr *init_attr);
@@ -592,6 +648,9 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc);
592 648
593void ipath_get_credit(struct ipath_qp *qp, u32 aeth); 649void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
594 650
651int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
652 u32 *hdr, u32 len, struct ipath_sge_state *ss);
653
595void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig); 654void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
596 655
597int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss, 656int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
@@ -638,7 +697,8 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
638 struct ib_udata *udata); 697 struct ib_udata *udata);
639 698
640int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, 699int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
641 enum ib_srq_attr_mask attr_mask); 700 enum ib_srq_attr_mask attr_mask,
701 struct ib_udata *udata);
642 702
643int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); 703int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
644 704
@@ -680,6 +740,10 @@ int ipath_unmap_fmr(struct list_head *fmr_list);
680 740
681int ipath_dealloc_fmr(struct ib_fmr *ibfmr); 741int ipath_dealloc_fmr(struct ib_fmr *ibfmr);
682 742
743void ipath_release_mmap_info(struct kref *ref);
744
745int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
746
683void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev); 747void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev);
684 748
685void ipath_insert_rnr_queue(struct ipath_qp *qp); 749void ipath_insert_rnr_queue(struct ipath_qp *qp);
@@ -700,6 +764,22 @@ int ipath_make_rc_req(struct ipath_qp *qp, struct ipath_other_headers *ohdr,
700int ipath_make_uc_req(struct ipath_qp *qp, struct ipath_other_headers *ohdr, 764int ipath_make_uc_req(struct ipath_qp *qp, struct ipath_other_headers *ohdr,
701 u32 pmtu, u32 *bth0p, u32 *bth2p); 765 u32 pmtu, u32 *bth0p, u32 *bth2p);
702 766
767int ipath_register_ib_device(struct ipath_devdata *);
768
769void ipath_unregister_ib_device(struct ipath_ibdev *);
770
771void ipath_ib_rcv(struct ipath_ibdev *, void *, void *, u32);
772
773int ipath_ib_piobufavail(struct ipath_ibdev *);
774
775void ipath_ib_timer(struct ipath_ibdev *);
776
777unsigned ipath_get_npkeys(struct ipath_devdata *);
778
779u32 ipath_get_cr_errpkey(struct ipath_devdata *);
780
781unsigned ipath_get_pkey(struct ipath_devdata *, unsigned);
782
703extern const enum ib_wc_opcode ib_ipath_wc_opcode[]; 783extern const enum ib_wc_opcode ib_ipath_wc_opcode[];
704 784
705extern const u8 ipath_cvt_physportstate[]; 785extern const u8 ipath_cvt_physportstate[];
@@ -714,6 +794,8 @@ extern unsigned int ib_ipath_max_cqs;
714 794
715extern unsigned int ib_ipath_max_qp_wrs; 795extern unsigned int ib_ipath_max_qp_wrs;
716 796
797extern unsigned int ib_ipath_max_qps;
798
717extern unsigned int ib_ipath_max_sges; 799extern unsigned int ib_ipath_max_sges;
718 800
719extern unsigned int ib_ipath_max_mcast_grps; 801extern unsigned int ib_ipath_max_mcast_grps;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c b/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
index ee0e1d96d723..085e28b939ec 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
@@ -207,12 +207,17 @@ static int ipath_mcast_add(struct ipath_ibdev *dev,
207 goto bail; 207 goto bail;
208 } 208 }
209 209
210 spin_lock(&dev->n_mcast_grps_lock);
210 if (dev->n_mcast_grps_allocated == ib_ipath_max_mcast_grps) { 211 if (dev->n_mcast_grps_allocated == ib_ipath_max_mcast_grps) {
212 spin_unlock(&dev->n_mcast_grps_lock);
211 ret = ENOMEM; 213 ret = ENOMEM;
212 goto bail; 214 goto bail;
213 } 215 }
214 216
215 dev->n_mcast_grps_allocated++; 217 dev->n_mcast_grps_allocated++;
218 spin_unlock(&dev->n_mcast_grps_lock);
219
220 mcast->n_attached++;
216 221
217 list_add_tail_rcu(&mqp->list, &mcast->qp_list); 222 list_add_tail_rcu(&mqp->list, &mcast->qp_list);
218 223
@@ -343,7 +348,9 @@ int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
343 atomic_dec(&mcast->refcount); 348 atomic_dec(&mcast->refcount);
344 wait_event(mcast->wait, !atomic_read(&mcast->refcount)); 349 wait_event(mcast->wait, !atomic_read(&mcast->refcount));
345 ipath_mcast_free(mcast); 350 ipath_mcast_free(mcast);
351 spin_lock(&dev->n_mcast_grps_lock);
346 dev->n_mcast_grps_allocated--; 352 dev->n_mcast_grps_allocated--;
353 spin_unlock(&dev->n_mcast_grps_lock);
347 } 354 }
348 355
349 ret = 0; 356 ret = 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c b/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
new file mode 100644
index 000000000000..036fde662aa9
--- /dev/null
+++ b/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
@@ -0,0 +1,52 @@
1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33/*
34 * This file is conditionally built on PowerPC only. Otherwise weak symbol
35 * versions of the functions exported from here are used.
36 */
37
38#include "ipath_kernel.h"
39
40/**
41 * ipath_unordered_wc - indicate whether write combining is ordered
42 *
43 * PowerPC systems (at least those in the 970 processor family)
44 * write partially filled store buffers in address order, but will write
45 * completely filled store buffers in "random" order, and therefore must
46 * have serialization for correctness with current InfiniPath chips.
47 *
48 */
49int ipath_unordered_wc(void)
50{
51 return 1;
52}
diff --git a/drivers/infiniband/hw/ipath/verbs_debug.h b/drivers/infiniband/hw/ipath/verbs_debug.h
deleted file mode 100644
index 6186676f2a16..000000000000
--- a/drivers/infiniband/hw/ipath/verbs_debug.h
+++ /dev/null
@@ -1,108 +0,0 @@
1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#ifndef _VERBS_DEBUG_H
35#define _VERBS_DEBUG_H
36
37/*
38 * This file contains tracing code for the ib_ipath kernel module.
39 */
40#ifndef _VERBS_DEBUGGING /* tracing enabled or not */
41#define _VERBS_DEBUGGING 1
42#endif
43
44extern unsigned ib_ipath_debug;
45
46#define _VERBS_ERROR(fmt,...) \
47 do { \
48 printk(KERN_ERR "%s: " fmt, "ib_ipath", ##__VA_ARGS__); \
49 } while(0)
50
51#define _VERBS_UNIT_ERROR(unit,fmt,...) \
52 do { \
53 printk(KERN_ERR "%s: " fmt, "ib_ipath", ##__VA_ARGS__); \
54 } while(0)
55
56#if _VERBS_DEBUGGING
57
58/*
59 * Mask values for debugging. The scheme allows us to compile out any
60 * of the debug tracing stuff, and if compiled in, to enable or
61 * disable dynamically.
62 * This can be set at modprobe time also:
63 * modprobe ib_path ib_ipath_debug=3
64 */
65
66#define __VERBS_INFO 0x1 /* generic low verbosity stuff */
67#define __VERBS_DBG 0x2 /* generic debug */
68#define __VERBS_VDBG 0x4 /* verbose debug */
69#define __VERBS_SMADBG 0x8000 /* sma packet debug */
70
71#define _VERBS_INFO(fmt,...) \
72 do { \
73 if (unlikely(ib_ipath_debug&__VERBS_INFO)) \
74 printk(KERN_INFO "%s: " fmt,"ib_ipath", \
75 ##__VA_ARGS__); \
76 } while(0)
77
78#define _VERBS_DBG(fmt,...) \
79 do { \
80 if (unlikely(ib_ipath_debug&__VERBS_DBG)) \
81 printk(KERN_DEBUG "%s: " fmt, __func__, \
82 ##__VA_ARGS__); \
83 } while(0)
84
85#define _VERBS_VDBG(fmt,...) \
86 do { \
87 if (unlikely(ib_ipath_debug&__VERBS_VDBG)) \
88 printk(KERN_DEBUG "%s: " fmt, __func__, \
89 ##__VA_ARGS__); \
90 } while(0)
91
92#define _VERBS_SMADBG(fmt,...) \
93 do { \
94 if (unlikely(ib_ipath_debug&__VERBS_SMADBG)) \
95 printk(KERN_DEBUG "%s: " fmt, __func__, \
96 ##__VA_ARGS__); \
97 } while(0)
98
99#else /* ! _VERBS_DEBUGGING */
100
101#define _VERBS_INFO(fmt,...)
102#define _VERBS_DBG(fmt,...)
103#define _VERBS_VDBG(fmt,...)
104#define _VERBS_SMADBG(fmt,...)
105
106#endif /* _VERBS_DEBUGGING */
107
108#endif /* _VERBS_DEBUG_H */
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index e215041b2db9..69599455aca2 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -90,7 +90,7 @@ static enum ib_rate tavor_rate_to_ib(u8 mthca_rate, u8 port_rate)
90 case MTHCA_RATE_TAVOR_1X: return IB_RATE_2_5_GBPS; 90 case MTHCA_RATE_TAVOR_1X: return IB_RATE_2_5_GBPS;
91 case MTHCA_RATE_TAVOR_1X_DDR: return IB_RATE_5_GBPS; 91 case MTHCA_RATE_TAVOR_1X_DDR: return IB_RATE_5_GBPS;
92 case MTHCA_RATE_TAVOR_4X: return IB_RATE_10_GBPS; 92 case MTHCA_RATE_TAVOR_4X: return IB_RATE_10_GBPS;
93 default: return port_rate; 93 default: return mult_to_ib_rate(port_rate);
94 } 94 }
95} 95}
96 96
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index c3bec7490f52..cd044ea2dfa4 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -34,6 +34,7 @@
34 34
35#include <linux/jiffies.h> 35#include <linux/jiffies.h>
36#include <linux/timer.h> 36#include <linux/timer.h>
37#include <linux/workqueue.h>
37 38
38#include "mthca_dev.h" 39#include "mthca_dev.h"
39 40
@@ -48,9 +49,41 @@ enum {
48 49
49static DEFINE_SPINLOCK(catas_lock); 50static DEFINE_SPINLOCK(catas_lock);
50 51
52static LIST_HEAD(catas_list);
53static struct workqueue_struct *catas_wq;
54static struct work_struct catas_work;
55
56static int catas_reset_disable;
57module_param_named(catas_reset_disable, catas_reset_disable, int, 0644);
58MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero");
59
60static void catas_reset(void *work_ptr)
61{
62 struct mthca_dev *dev, *tmpdev;
63 LIST_HEAD(tlist);
64 int ret;
65
66 mutex_lock(&mthca_device_mutex);
67
68 spin_lock_irq(&catas_lock);
69 list_splice_init(&catas_list, &tlist);
70 spin_unlock_irq(&catas_lock);
71
72 list_for_each_entry_safe(dev, tmpdev, &tlist, catas_err.list) {
73 ret = __mthca_restart_one(dev->pdev);
74 if (ret)
75 mthca_err(dev, "Reset failed (%d)\n", ret);
76 else
77 mthca_dbg(dev, "Reset succeeded\n");
78 }
79
80 mutex_unlock(&mthca_device_mutex);
81}
82
51static void handle_catas(struct mthca_dev *dev) 83static void handle_catas(struct mthca_dev *dev)
52{ 84{
53 struct ib_event event; 85 struct ib_event event;
86 unsigned long flags;
54 const char *type; 87 const char *type;
55 int i; 88 int i;
56 89
@@ -82,6 +115,14 @@ static void handle_catas(struct mthca_dev *dev)
82 for (i = 0; i < dev->catas_err.size; ++i) 115 for (i = 0; i < dev->catas_err.size; ++i)
83 mthca_err(dev, " buf[%02x]: %08x\n", 116 mthca_err(dev, " buf[%02x]: %08x\n",
84 i, swab32(readl(dev->catas_err.map + i))); 117 i, swab32(readl(dev->catas_err.map + i)));
118
119 if (catas_reset_disable)
120 return;
121
122 spin_lock_irqsave(&catas_lock, flags);
123 list_add(&dev->catas_err.list, &catas_list);
124 queue_work(catas_wq, &catas_work);
125 spin_unlock_irqrestore(&catas_lock, flags);
85} 126}
86 127
87static void poll_catas(unsigned long dev_ptr) 128static void poll_catas(unsigned long dev_ptr)
@@ -135,6 +176,7 @@ void mthca_start_catas_poll(struct mthca_dev *dev)
135 dev->catas_err.timer.data = (unsigned long) dev; 176 dev->catas_err.timer.data = (unsigned long) dev;
136 dev->catas_err.timer.function = poll_catas; 177 dev->catas_err.timer.function = poll_catas;
137 dev->catas_err.timer.expires = jiffies + MTHCA_CATAS_POLL_INTERVAL; 178 dev->catas_err.timer.expires = jiffies + MTHCA_CATAS_POLL_INTERVAL;
179 INIT_LIST_HEAD(&dev->catas_err.list);
138 add_timer(&dev->catas_err.timer); 180 add_timer(&dev->catas_err.timer);
139} 181}
140 182
@@ -153,4 +195,24 @@ void mthca_stop_catas_poll(struct mthca_dev *dev)
153 dev->catas_err.addr), 195 dev->catas_err.addr),
154 dev->catas_err.size * 4); 196 dev->catas_err.size * 4);
155 } 197 }
198
199 spin_lock_irq(&catas_lock);
200 list_del(&dev->catas_err.list);
201 spin_unlock_irq(&catas_lock);
202}
203
204int __init mthca_catas_init(void)
205{
206 INIT_WORK(&catas_work, catas_reset, NULL);
207
208 catas_wq = create_singlethread_workqueue("mthca_catas");
209 if (!catas_wq)
210 return -ENOMEM;
211
212 return 0;
213}
214
215void mthca_catas_cleanup(void)
216{
217 destroy_workqueue(catas_wq);
156} 218}
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index deabc14b4ea4..99a94d710935 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -34,7 +34,7 @@
34 * $Id: mthca_cmd.c 1349 2004-12-16 21:09:43Z roland $ 34 * $Id: mthca_cmd.c 1349 2004-12-16 21:09:43Z roland $
35 */ 35 */
36 36
37#include <linux/sched.h> 37#include <linux/completion.h>
38#include <linux/pci.h> 38#include <linux/pci.h>
39#include <linux/errno.h> 39#include <linux/errno.h>
40#include <asm/io.h> 40#include <asm/io.h>
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 3e27a084257e..e393681ba7d4 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -544,11 +544,11 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
544 wq = &(*cur_qp)->rq; 544 wq = &(*cur_qp)->rq;
545 wqe = be32_to_cpu(cqe->wqe); 545 wqe = be32_to_cpu(cqe->wqe);
546 wqe_index = wqe >> wq->wqe_shift; 546 wqe_index = wqe >> wq->wqe_shift;
547 /* 547 /*
548 * WQE addr == base - 1 might be reported in receive completion 548 * WQE addr == base - 1 might be reported in receive completion
549 * with error instead of (rq size - 1) by Sinai FW 1.0.800 and 549 * with error instead of (rq size - 1) by Sinai FW 1.0.800 and
550 * Arbel FW 5.1.400. This bug should be fixed in later FW revs. 550 * Arbel FW 5.1.400. This bug should be fixed in later FW revs.
551 */ 551 */
552 if (unlikely(wqe_index < 0)) 552 if (unlikely(wqe_index < 0))
553 wqe_index = wq->max - 1; 553 wqe_index = wq->max - 1;
554 entry->wr_id = (*cur_qp)->wrid[wqe_index]; 554 entry->wr_id = (*cur_qp)->wrid[wqe_index];
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index f8160b8de090..fe5cecf70fed 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -45,6 +45,7 @@
45#include <linux/dma-mapping.h> 45#include <linux/dma-mapping.h>
46#include <linux/timer.h> 46#include <linux/timer.h>
47#include <linux/mutex.h> 47#include <linux/mutex.h>
48#include <linux/list.h>
48 49
49#include <asm/semaphore.h> 50#include <asm/semaphore.h>
50 51
@@ -283,8 +284,11 @@ struct mthca_catas_err {
283 unsigned long stop; 284 unsigned long stop;
284 u32 size; 285 u32 size;
285 struct timer_list timer; 286 struct timer_list timer;
287 struct list_head list;
286}; 288};
287 289
290extern struct mutex mthca_device_mutex;
291
288struct mthca_dev { 292struct mthca_dev {
289 struct ib_device ib_dev; 293 struct ib_device ib_dev;
290 struct pci_dev *pdev; 294 struct pci_dev *pdev;
@@ -450,6 +454,9 @@ void mthca_unregister_device(struct mthca_dev *dev);
450 454
451void mthca_start_catas_poll(struct mthca_dev *dev); 455void mthca_start_catas_poll(struct mthca_dev *dev);
452void mthca_stop_catas_poll(struct mthca_dev *dev); 456void mthca_stop_catas_poll(struct mthca_dev *dev);
457int __mthca_restart_one(struct pci_dev *pdev);
458int mthca_catas_init(void);
459void mthca_catas_cleanup(void);
453 460
454int mthca_uar_alloc(struct mthca_dev *dev, struct mthca_uar *uar); 461int mthca_uar_alloc(struct mthca_dev *dev, struct mthca_uar *uar);
455void mthca_uar_free(struct mthca_dev *dev, struct mthca_uar *uar); 462void mthca_uar_free(struct mthca_dev *dev, struct mthca_uar *uar);
@@ -506,7 +513,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
506 struct ib_srq_attr *attr, struct mthca_srq *srq); 513 struct ib_srq_attr *attr, struct mthca_srq *srq);
507void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq); 514void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq);
508int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, 515int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
509 enum ib_srq_attr_mask attr_mask); 516 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
510int mthca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); 517int mthca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
511int mthca_max_srq_sge(struct mthca_dev *dev); 518int mthca_max_srq_sge(struct mthca_dev *dev);
512void mthca_srq_event(struct mthca_dev *dev, u32 srqn, 519void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
@@ -521,7 +528,8 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
521 enum ib_event_type event_type); 528 enum ib_event_type event_type);
522int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, 529int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
523 struct ib_qp_init_attr *qp_init_attr); 530 struct ib_qp_init_attr *qp_init_attr);
524int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask); 531int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
532 struct ib_udata *udata);
525int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 533int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
526 struct ib_send_wr **bad_wr); 534 struct ib_send_wr **bad_wr);
527int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 535int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index d9bc030bcccc..45e106f14807 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -119,7 +119,7 @@ static void smp_snoop(struct ib_device *ibdev,
119 119
120 mthca_update_rate(to_mdev(ibdev), port_num); 120 mthca_update_rate(to_mdev(ibdev), port_num);
121 update_sm_ah(to_mdev(ibdev), port_num, 121 update_sm_ah(to_mdev(ibdev), port_num,
122 be16_to_cpu(pinfo->lid), 122 be16_to_cpu(pinfo->sm_lid),
123 pinfo->neighbormtu_mastersmsl & 0xf); 123 pinfo->neighbormtu_mastersmsl & 0xf);
124 124
125 event.device = ibdev; 125 event.device = ibdev;
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 7b82c1907f04..47ea02148368 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -80,6 +80,8 @@ static int tune_pci = 0;
80module_param(tune_pci, int, 0444); 80module_param(tune_pci, int, 0444);
81MODULE_PARM_DESC(tune_pci, "increase PCI burst from the default set by BIOS if nonzero"); 81MODULE_PARM_DESC(tune_pci, "increase PCI burst from the default set by BIOS if nonzero");
82 82
83struct mutex mthca_device_mutex;
84
83static const char mthca_version[] __devinitdata = 85static const char mthca_version[] __devinitdata =
84 DRV_NAME ": Mellanox InfiniBand HCA driver v" 86 DRV_NAME ": Mellanox InfiniBand HCA driver v"
85 DRV_VERSION " (" DRV_RELDATE ")\n"; 87 DRV_VERSION " (" DRV_RELDATE ")\n";
@@ -978,28 +980,15 @@ static struct {
978 MTHCA_FLAG_SINAI_OPT } 980 MTHCA_FLAG_SINAI_OPT }
979}; 981};
980 982
981static int __devinit mthca_init_one(struct pci_dev *pdev, 983static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
982 const struct pci_device_id *id)
983{ 984{
984 static int mthca_version_printed = 0;
985 int ddr_hidden = 0; 985 int ddr_hidden = 0;
986 int err; 986 int err;
987 struct mthca_dev *mdev; 987 struct mthca_dev *mdev;
988 988
989 if (!mthca_version_printed) {
990 printk(KERN_INFO "%s", mthca_version);
991 ++mthca_version_printed;
992 }
993
994 printk(KERN_INFO PFX "Initializing %s\n", 989 printk(KERN_INFO PFX "Initializing %s\n",
995 pci_name(pdev)); 990 pci_name(pdev));
996 991
997 if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) {
998 printk(KERN_ERR PFX "%s has invalid driver data %lx\n",
999 pci_name(pdev), id->driver_data);
1000 return -ENODEV;
1001 }
1002
1003 err = pci_enable_device(pdev); 992 err = pci_enable_device(pdev);
1004 if (err) { 993 if (err) {
1005 dev_err(&pdev->dev, "Cannot enable PCI device, " 994 dev_err(&pdev->dev, "Cannot enable PCI device, "
@@ -1065,7 +1054,7 @@ static int __devinit mthca_init_one(struct pci_dev *pdev,
1065 1054
1066 mdev->pdev = pdev; 1055 mdev->pdev = pdev;
1067 1056
1068 mdev->mthca_flags = mthca_hca_table[id->driver_data].flags; 1057 mdev->mthca_flags = mthca_hca_table[hca_type].flags;
1069 if (ddr_hidden) 1058 if (ddr_hidden)
1070 mdev->mthca_flags |= MTHCA_FLAG_DDR_HIDDEN; 1059 mdev->mthca_flags |= MTHCA_FLAG_DDR_HIDDEN;
1071 1060
@@ -1099,13 +1088,13 @@ static int __devinit mthca_init_one(struct pci_dev *pdev,
1099 if (err) 1088 if (err)
1100 goto err_cmd; 1089 goto err_cmd;
1101 1090
1102 if (mdev->fw_ver < mthca_hca_table[id->driver_data].latest_fw) { 1091 if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) {
1103 mthca_warn(mdev, "HCA FW version %d.%d.%d is old (%d.%d.%d is current).\n", 1092 mthca_warn(mdev, "HCA FW version %d.%d.%d is old (%d.%d.%d is current).\n",
1104 (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff, 1093 (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff,
1105 (int) (mdev->fw_ver & 0xffff), 1094 (int) (mdev->fw_ver & 0xffff),
1106 (int) (mthca_hca_table[id->driver_data].latest_fw >> 32), 1095 (int) (mthca_hca_table[hca_type].latest_fw >> 32),
1107 (int) (mthca_hca_table[id->driver_data].latest_fw >> 16) & 0xffff, 1096 (int) (mthca_hca_table[hca_type].latest_fw >> 16) & 0xffff,
1108 (int) (mthca_hca_table[id->driver_data].latest_fw & 0xffff)); 1097 (int) (mthca_hca_table[hca_type].latest_fw & 0xffff));
1109 mthca_warn(mdev, "If you have problems, try updating your HCA FW.\n"); 1098 mthca_warn(mdev, "If you have problems, try updating your HCA FW.\n");
1110 } 1099 }
1111 1100
@@ -1122,6 +1111,7 @@ static int __devinit mthca_init_one(struct pci_dev *pdev,
1122 goto err_unregister; 1111 goto err_unregister;
1123 1112
1124 pci_set_drvdata(pdev, mdev); 1113 pci_set_drvdata(pdev, mdev);
1114 mdev->hca_type = hca_type;
1125 1115
1126 return 0; 1116 return 0;
1127 1117
@@ -1166,7 +1156,7 @@ err_disable_pdev:
1166 return err; 1156 return err;
1167} 1157}
1168 1158
1169static void __devexit mthca_remove_one(struct pci_dev *pdev) 1159static void __mthca_remove_one(struct pci_dev *pdev)
1170{ 1160{
1171 struct mthca_dev *mdev = pci_get_drvdata(pdev); 1161 struct mthca_dev *mdev = pci_get_drvdata(pdev);
1172 u8 status; 1162 u8 status;
@@ -1211,6 +1201,51 @@ static void __devexit mthca_remove_one(struct pci_dev *pdev)
1211 } 1201 }
1212} 1202}
1213 1203
1204int __mthca_restart_one(struct pci_dev *pdev)
1205{
1206 struct mthca_dev *mdev;
1207
1208 mdev = pci_get_drvdata(pdev);
1209 if (!mdev)
1210 return -ENODEV;
1211 __mthca_remove_one(pdev);
1212 return __mthca_init_one(pdev, mdev->hca_type);
1213}
1214
1215static int __devinit mthca_init_one(struct pci_dev *pdev,
1216 const struct pci_device_id *id)
1217{
1218 static int mthca_version_printed = 0;
1219 int ret;
1220
1221 mutex_lock(&mthca_device_mutex);
1222
1223 if (!mthca_version_printed) {
1224 printk(KERN_INFO "%s", mthca_version);
1225 ++mthca_version_printed;
1226 }
1227
1228 if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) {
1229 printk(KERN_ERR PFX "%s has invalid driver data %lx\n",
1230 pci_name(pdev), id->driver_data);
1231 mutex_unlock(&mthca_device_mutex);
1232 return -ENODEV;
1233 }
1234
1235 ret = __mthca_init_one(pdev, id->driver_data);
1236
1237 mutex_unlock(&mthca_device_mutex);
1238
1239 return ret;
1240}
1241
1242static void __devexit mthca_remove_one(struct pci_dev *pdev)
1243{
1244 mutex_lock(&mthca_device_mutex);
1245 __mthca_remove_one(pdev);
1246 mutex_unlock(&mthca_device_mutex);
1247}
1248
1214static struct pci_device_id mthca_pci_table[] = { 1249static struct pci_device_id mthca_pci_table[] = {
1215 { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR), 1250 { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR),
1216 .driver_data = TAVOR }, 1251 .driver_data = TAVOR },
@@ -1248,13 +1283,24 @@ static int __init mthca_init(void)
1248{ 1283{
1249 int ret; 1284 int ret;
1250 1285
1286 mutex_init(&mthca_device_mutex);
1287 ret = mthca_catas_init();
1288 if (ret)
1289 return ret;
1290
1251 ret = pci_register_driver(&mthca_driver); 1291 ret = pci_register_driver(&mthca_driver);
1252 return ret < 0 ? ret : 0; 1292 if (ret < 0) {
1293 mthca_catas_cleanup();
1294 return ret;
1295 }
1296
1297 return 0;
1253} 1298}
1254 1299
1255static void __exit mthca_cleanup(void) 1300static void __exit mthca_cleanup(void)
1256{ 1301{
1257 pci_unregister_driver(&mthca_driver); 1302 pci_unregister_driver(&mthca_driver);
1303 mthca_catas_cleanup();
1258} 1304}
1259 1305
1260module_init(mthca_init); 1306module_init(mthca_init);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 265b1d1c4a62..981fe2eebdfa 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1288,7 +1288,7 @@ int mthca_register_device(struct mthca_dev *dev)
1288 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 1288 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1289 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | 1289 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
1290 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST); 1290 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
1291 dev->ib_dev.node_type = IB_NODE_CA; 1291 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
1292 dev->ib_dev.phys_port_cnt = dev->limits.num_ports; 1292 dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
1293 dev->ib_dev.dma_device = &dev->pdev->dev; 1293 dev->ib_dev.dma_device = &dev->pdev->dev;
1294 dev->ib_dev.class_dev.dev = &dev->pdev->dev; 1294 dev->ib_dev.class_dev.dev = &dev->pdev->dev;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 2e8f6f36e0a5..5e5c58b9920b 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -408,7 +408,7 @@ static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr,
408 ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28; 408 ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28;
409 ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f; 409 ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f;
410 ib_ah_attr->static_rate = mthca_rate_to_ib(dev, 410 ib_ah_attr->static_rate = mthca_rate_to_ib(dev,
411 path->static_rate & 0x7, 411 path->static_rate & 0xf,
412 ib_ah_attr->port_num); 412 ib_ah_attr->port_num);
413 ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0; 413 ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
414 if (ib_ah_attr->ah_flags) { 414 if (ib_ah_attr->ah_flags) {
@@ -472,10 +472,14 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
472 if (qp->transport == RC || qp->transport == UC) { 472 if (qp->transport == RC || qp->transport == UC) {
473 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); 473 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
474 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); 474 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
475 qp_attr->alt_pkey_index =
476 be32_to_cpu(context->alt_path.port_pkey) & 0x7f;
477 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
475 } 478 }
476 479
477 qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f; 480 qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;
478 qp_attr->alt_pkey_index = be32_to_cpu(context->alt_path.port_pkey) & 0x7f; 481 qp_attr->port_num =
482 (be32_to_cpu(context->pri_path.port_pkey) >> 24) & 0x3;
479 483
480 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ 484 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
481 qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING; 485 qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING;
@@ -486,11 +490,9 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
486 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7); 490 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
487 qp_attr->min_rnr_timer = 491 qp_attr->min_rnr_timer =
488 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f; 492 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
489 qp_attr->port_num = qp_attr->ah_attr.port_num;
490 qp_attr->timeout = context->pri_path.ackto >> 3; 493 qp_attr->timeout = context->pri_path.ackto >> 3;
491 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; 494 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
492 qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5; 495 qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5;
493 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
494 qp_attr->alt_timeout = context->alt_path.ackto >> 3; 496 qp_attr->alt_timeout = context->alt_path.ackto >> 3;
495 qp_init_attr->cap = qp_attr->cap; 497 qp_init_attr->cap = qp_attr->cap;
496 498
@@ -527,7 +529,8 @@ static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah,
527 return 0; 529 return 0;
528} 530}
529 531
530int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) 532int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
533 struct ib_udata *udata)
531{ 534{
532 struct mthca_dev *dev = to_mdev(ibqp->device); 535 struct mthca_dev *dev = to_mdev(ibqp->device);
533 struct mthca_qp *qp = to_mqp(ibqp); 536 struct mthca_qp *qp = to_mqp(ibqp);
@@ -842,11 +845,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
842 * entries and reinitialize the QP. 845 * entries and reinitialize the QP.
843 */ 846 */
844 if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { 847 if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
845 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, 848 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
846 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 849 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
847 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) 850 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
848 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, 851 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL);
849 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
850 852
851 mthca_wq_reset(&qp->sq); 853 mthca_wq_reset(&qp->sq);
852 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); 854 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index b60a9d79ae54..0f316c87bf64 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -358,7 +358,7 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
358} 358}
359 359
360int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, 360int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
361 enum ib_srq_attr_mask attr_mask) 361 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
362{ 362{
363 struct mthca_dev *dev = to_mdev(ibsrq->device); 363 struct mthca_dev *dev = to_mdev(ibsrq->device);
364 struct mthca_srq *srq = to_msrq(ibsrq); 364 struct mthca_srq *srq = to_msrq(ibsrq);
diff --git a/drivers/infiniband/hw/mthca/mthca_uar.c b/drivers/infiniband/hw/mthca/mthca_uar.c
index 8e9219842be4..8b728486410d 100644
--- a/drivers/infiniband/hw/mthca/mthca_uar.c
+++ b/drivers/infiniband/hw/mthca/mthca_uar.c
@@ -60,7 +60,7 @@ int mthca_init_uar_table(struct mthca_dev *dev)
60 ret = mthca_alloc_init(&dev->uar_table.alloc, 60 ret = mthca_alloc_init(&dev->uar_table.alloc,
61 dev->limits.num_uars, 61 dev->limits.num_uars,
62 dev->limits.num_uars - 1, 62 dev->limits.num_uars - 1,
63 dev->limits.reserved_uars); 63 dev->limits.reserved_uars + 1);
64 if (ret) 64 if (ret)
65 return ret; 65 return ret;
66 66
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 474aa214ab57..0b8a79d53a00 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -336,6 +336,8 @@ static inline void ipoib_unregister_debugfs(void) { }
336extern int ipoib_sendq_size; 336extern int ipoib_sendq_size;
337extern int ipoib_recvq_size; 337extern int ipoib_recvq_size;
338 338
339extern struct ib_sa_client ipoib_sa_client;
340
339#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 341#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
340extern int ipoib_debug_level; 342extern int ipoib_debug_level;
341 343
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 5033666b1481..f426a69d9a43 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -169,117 +169,129 @@ static int ipoib_ib_post_receives(struct net_device *dev)
169 return 0; 169 return 0;
170} 170}
171 171
172static void ipoib_ib_handle_wc(struct net_device *dev, 172static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
173 struct ib_wc *wc)
174{ 173{
175 struct ipoib_dev_priv *priv = netdev_priv(dev); 174 struct ipoib_dev_priv *priv = netdev_priv(dev);
176 unsigned int wr_id = wc->wr_id; 175 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
176 struct sk_buff *skb;
177 dma_addr_t addr;
177 178
178 ipoib_dbg_data(priv, "called: id %d, op %d, status: %d\n", 179 ipoib_dbg_data(priv, "recv completion: id %d, op %d, status: %d\n",
179 wr_id, wc->opcode, wc->status); 180 wr_id, wc->opcode, wc->status);
180 181
181 if (wr_id & IPOIB_OP_RECV) { 182 if (unlikely(wr_id >= ipoib_recvq_size)) {
182 wr_id &= ~IPOIB_OP_RECV; 183 ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
183 184 wr_id, ipoib_recvq_size);
184 if (wr_id < ipoib_recvq_size) { 185 return;
185 struct sk_buff *skb = priv->rx_ring[wr_id].skb; 186 }
186 dma_addr_t addr = priv->rx_ring[wr_id].mapping;
187
188 if (unlikely(wc->status != IB_WC_SUCCESS)) {
189 if (wc->status != IB_WC_WR_FLUSH_ERR)
190 ipoib_warn(priv, "failed recv event "
191 "(status=%d, wrid=%d vend_err %x)\n",
192 wc->status, wr_id, wc->vendor_err);
193 dma_unmap_single(priv->ca->dma_device, addr,
194 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
195 dev_kfree_skb_any(skb);
196 priv->rx_ring[wr_id].skb = NULL;
197 return;
198 }
199 187
200 /* 188 skb = priv->rx_ring[wr_id].skb;
201 * If we can't allocate a new RX buffer, dump 189 addr = priv->rx_ring[wr_id].mapping;
202 * this packet and reuse the old buffer.
203 */
204 if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
205 ++priv->stats.rx_dropped;
206 goto repost;
207 }
208 190
209 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", 191 if (unlikely(wc->status != IB_WC_SUCCESS)) {
210 wc->byte_len, wc->slid); 192 if (wc->status != IB_WC_WR_FLUSH_ERR)
193 ipoib_warn(priv, "failed recv event "
194 "(status=%d, wrid=%d vend_err %x)\n",
195 wc->status, wr_id, wc->vendor_err);
196 dma_unmap_single(priv->ca->dma_device, addr,
197 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
198 dev_kfree_skb_any(skb);
199 priv->rx_ring[wr_id].skb = NULL;
200 return;
201 }
211 202
212 dma_unmap_single(priv->ca->dma_device, addr, 203 /*
213 IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 204 * If we can't allocate a new RX buffer, dump
205 * this packet and reuse the old buffer.
206 */
207 if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
208 ++priv->stats.rx_dropped;
209 goto repost;
210 }
214 211
215 skb_put(skb, wc->byte_len); 212 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
216 skb_pull(skb, IB_GRH_BYTES); 213 wc->byte_len, wc->slid);
217 214
218 if (wc->slid != priv->local_lid || 215 dma_unmap_single(priv->ca->dma_device, addr,
219 wc->src_qp != priv->qp->qp_num) { 216 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
220 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
221 skb->mac.raw = skb->data;
222 skb_pull(skb, IPOIB_ENCAP_LEN);
223 217
224 dev->last_rx = jiffies; 218 skb_put(skb, wc->byte_len);
225 ++priv->stats.rx_packets; 219 skb_pull(skb, IB_GRH_BYTES);
226 priv->stats.rx_bytes += skb->len;
227 220
228 skb->dev = dev; 221 if (wc->slid != priv->local_lid ||
229 /* XXX get correct PACKET_ type here */ 222 wc->src_qp != priv->qp->qp_num) {
230 skb->pkt_type = PACKET_HOST; 223 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
231 netif_rx_ni(skb); 224 skb->mac.raw = skb->data;
232 } else { 225 skb_pull(skb, IPOIB_ENCAP_LEN);
233 ipoib_dbg_data(priv, "dropping loopback packet\n");
234 dev_kfree_skb_any(skb);
235 }
236 226
237 repost: 227 dev->last_rx = jiffies;
238 if (unlikely(ipoib_ib_post_receive(dev, wr_id))) 228 ++priv->stats.rx_packets;
239 ipoib_warn(priv, "ipoib_ib_post_receive failed " 229 priv->stats.rx_bytes += skb->len;
240 "for buf %d\n", wr_id);
241 } else
242 ipoib_warn(priv, "completion event with wrid %d\n",
243 wr_id);
244 230
231 skb->dev = dev;
232 /* XXX get correct PACKET_ type here */
233 skb->pkt_type = PACKET_HOST;
234 netif_rx_ni(skb);
245 } else { 235 } else {
246 struct ipoib_tx_buf *tx_req; 236 ipoib_dbg_data(priv, "dropping loopback packet\n");
247 unsigned long flags; 237 dev_kfree_skb_any(skb);
238 }
248 239
249 if (wr_id >= ipoib_sendq_size) { 240repost:
250 ipoib_warn(priv, "completion event with wrid %d (> %d)\n", 241 if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
251 wr_id, ipoib_sendq_size); 242 ipoib_warn(priv, "ipoib_ib_post_receive failed "
252 return; 243 "for buf %d\n", wr_id);
253 } 244}
254 245
255 ipoib_dbg_data(priv, "send complete, wrid %d\n", wr_id); 246static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
247{
248 struct ipoib_dev_priv *priv = netdev_priv(dev);
249 unsigned int wr_id = wc->wr_id;
250 struct ipoib_tx_buf *tx_req;
251 unsigned long flags;
256 252
257 tx_req = &priv->tx_ring[wr_id]; 253 ipoib_dbg_data(priv, "send completion: id %d, op %d, status: %d\n",
254 wr_id, wc->opcode, wc->status);
258 255
259 dma_unmap_single(priv->ca->dma_device, 256 if (unlikely(wr_id >= ipoib_sendq_size)) {
260 pci_unmap_addr(tx_req, mapping), 257 ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
261 tx_req->skb->len, 258 wr_id, ipoib_sendq_size);
262 DMA_TO_DEVICE); 259 return;
260 }
263 261
264 ++priv->stats.tx_packets; 262 tx_req = &priv->tx_ring[wr_id];
265 priv->stats.tx_bytes += tx_req->skb->len;
266 263
267 dev_kfree_skb_any(tx_req->skb); 264 dma_unmap_single(priv->ca->dma_device,
265 pci_unmap_addr(tx_req, mapping),
266 tx_req->skb->len,
267 DMA_TO_DEVICE);
268 268
269 spin_lock_irqsave(&priv->tx_lock, flags); 269 ++priv->stats.tx_packets;
270 ++priv->tx_tail; 270 priv->stats.tx_bytes += tx_req->skb->len;
271 if (netif_queue_stopped(dev) &&
272 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags) &&
273 priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1)
274 netif_wake_queue(dev);
275 spin_unlock_irqrestore(&priv->tx_lock, flags);
276 271
277 if (wc->status != IB_WC_SUCCESS && 272 dev_kfree_skb_any(tx_req->skb);
278 wc->status != IB_WC_WR_FLUSH_ERR) 273
279 ipoib_warn(priv, "failed send event " 274 spin_lock_irqsave(&priv->tx_lock, flags);
280 "(status=%d, wrid=%d vend_err %x)\n", 275 ++priv->tx_tail;
281 wc->status, wr_id, wc->vendor_err); 276 if (netif_queue_stopped(dev) &&
282 } 277 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags) &&
278 priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1)
279 netif_wake_queue(dev);
280 spin_unlock_irqrestore(&priv->tx_lock, flags);
281
282 if (wc->status != IB_WC_SUCCESS &&
283 wc->status != IB_WC_WR_FLUSH_ERR)
284 ipoib_warn(priv, "failed send event "
285 "(status=%d, wrid=%d vend_err %x)\n",
286 wc->status, wr_id, wc->vendor_err);
287}
288
289static void ipoib_ib_handle_wc(struct net_device *dev, struct ib_wc *wc)
290{
291 if (wc->wr_id & IPOIB_OP_RECV)
292 ipoib_ib_handle_rx_wc(dev, wc);
293 else
294 ipoib_ib_handle_tx_wc(dev, wc);
283} 295}
284 296
285void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) 297void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
@@ -320,7 +332,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
320 struct ipoib_tx_buf *tx_req; 332 struct ipoib_tx_buf *tx_req;
321 dma_addr_t addr; 333 dma_addr_t addr;
322 334
323 if (skb->len > dev->mtu + INFINIBAND_ALEN) { 335 if (unlikely(skb->len > dev->mtu + INFINIBAND_ALEN)) {
324 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", 336 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
325 skb->len, dev->mtu + INFINIBAND_ALEN); 337 skb->len, dev->mtu + INFINIBAND_ALEN);
326 ++priv->stats.tx_dropped; 338 ++priv->stats.tx_dropped;
@@ -619,8 +631,10 @@ void ipoib_ib_dev_flush(void *_dev)
619 * The device could have been brought down between the start and when 631 * The device could have been brought down between the start and when
620 * we get here, don't bring it back up if it's not configured up 632 * we get here, don't bring it back up if it's not configured up
621 */ 633 */
622 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 634 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
623 ipoib_ib_dev_up(dev); 635 ipoib_ib_dev_up(dev);
636 ipoib_mcast_restart_task(dev);
637 }
624 638
625 mutex_lock(&priv->vlan_mutex); 639 mutex_lock(&priv->vlan_mutex);
626 640
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index cf71d2a5515c..1eaf00e9862c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -40,7 +40,6 @@
40 40
41#include <linux/init.h> 41#include <linux/init.h>
42#include <linux/slab.h> 42#include <linux/slab.h>
43#include <linux/vmalloc.h>
44#include <linux/kernel.h> 43#include <linux/kernel.h>
45 44
46#include <linux/if_arp.h> /* For ARPHRD_xxx */ 45#include <linux/if_arp.h> /* For ARPHRD_xxx */
@@ -82,6 +81,8 @@ static const u8 ipv4_bcast_addr[] = {
82 81
83struct workqueue_struct *ipoib_workqueue; 82struct workqueue_struct *ipoib_workqueue;
84 83
84struct ib_sa_client ipoib_sa_client;
85
85static void ipoib_add_one(struct ib_device *device); 86static void ipoib_add_one(struct ib_device *device);
86static void ipoib_remove_one(struct ib_device *device); 87static void ipoib_remove_one(struct ib_device *device);
87 88
@@ -336,7 +337,8 @@ void ipoib_flush_paths(struct net_device *dev)
336 struct ipoib_path *path, *tp; 337 struct ipoib_path *path, *tp;
337 LIST_HEAD(remove_list); 338 LIST_HEAD(remove_list);
338 339
339 spin_lock_irq(&priv->lock); 340 spin_lock_irq(&priv->tx_lock);
341 spin_lock(&priv->lock);
340 342
341 list_splice(&priv->path_list, &remove_list); 343 list_splice(&priv->path_list, &remove_list);
342 INIT_LIST_HEAD(&priv->path_list); 344 INIT_LIST_HEAD(&priv->path_list);
@@ -347,12 +349,15 @@ void ipoib_flush_paths(struct net_device *dev)
347 list_for_each_entry_safe(path, tp, &remove_list, list) { 349 list_for_each_entry_safe(path, tp, &remove_list, list) {
348 if (path->query) 350 if (path->query)
349 ib_sa_cancel_query(path->query_id, path->query); 351 ib_sa_cancel_query(path->query_id, path->query);
350 spin_unlock_irq(&priv->lock); 352 spin_unlock(&priv->lock);
353 spin_unlock_irq(&priv->tx_lock);
351 wait_for_completion(&path->done); 354 wait_for_completion(&path->done);
352 path_free(dev, path); 355 path_free(dev, path);
353 spin_lock_irq(&priv->lock); 356 spin_lock_irq(&priv->tx_lock);
357 spin_lock(&priv->lock);
354 } 358 }
355 spin_unlock_irq(&priv->lock); 359 spin_unlock(&priv->lock);
360 spin_unlock_irq(&priv->tx_lock);
356} 361}
357 362
358static void path_rec_completion(int status, 363static void path_rec_completion(int status,
@@ -459,7 +464,7 @@ static int path_rec_start(struct net_device *dev,
459 init_completion(&path->done); 464 init_completion(&path->done);
460 465
461 path->query_id = 466 path->query_id =
462 ib_sa_path_rec_get(priv->ca, priv->port, 467 ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
463 &path->pathrec, 468 &path->pathrec,
464 IB_SA_PATH_REC_DGID | 469 IB_SA_PATH_REC_DGID |
465 IB_SA_PATH_REC_SGID | 470 IB_SA_PATH_REC_SGID |
@@ -615,7 +620,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
615 struct ipoib_neigh *neigh; 620 struct ipoib_neigh *neigh;
616 unsigned long flags; 621 unsigned long flags;
617 622
618 if (!spin_trylock_irqsave(&priv->tx_lock, flags)) 623 if (unlikely(!spin_trylock_irqsave(&priv->tx_lock, flags)))
619 return NETDEV_TX_LOCKED; 624 return NETDEV_TX_LOCKED;
620 625
621 /* 626 /*
@@ -628,7 +633,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
628 return NETDEV_TX_BUSY; 633 return NETDEV_TX_BUSY;
629 } 634 }
630 635
631 if (skb->dst && skb->dst->neighbour) { 636 if (likely(skb->dst && skb->dst->neighbour)) {
632 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) { 637 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
633 ipoib_path_lookup(skb, dev); 638 ipoib_path_lookup(skb, dev);
634 goto out; 639 goto out;
@@ -1107,13 +1112,16 @@ static void ipoib_add_one(struct ib_device *device)
1107 struct ipoib_dev_priv *priv; 1112 struct ipoib_dev_priv *priv;
1108 int s, e, p; 1113 int s, e, p;
1109 1114
1115 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1116 return;
1117
1110 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); 1118 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
1111 if (!dev_list) 1119 if (!dev_list)
1112 return; 1120 return;
1113 1121
1114 INIT_LIST_HEAD(dev_list); 1122 INIT_LIST_HEAD(dev_list);
1115 1123
1116 if (device->node_type == IB_NODE_SWITCH) { 1124 if (device->node_type == RDMA_NODE_IB_SWITCH) {
1117 s = 0; 1125 s = 0;
1118 e = 0; 1126 e = 0;
1119 } else { 1127 } else {
@@ -1137,6 +1145,9 @@ static void ipoib_remove_one(struct ib_device *device)
1137 struct ipoib_dev_priv *priv, *tmp; 1145 struct ipoib_dev_priv *priv, *tmp;
1138 struct list_head *dev_list; 1146 struct list_head *dev_list;
1139 1147
1148 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
1149 return;
1150
1140 dev_list = ib_get_client_data(device, &ipoib_client); 1151 dev_list = ib_get_client_data(device, &ipoib_client);
1141 1152
1142 list_for_each_entry_safe(priv, tmp, dev_list, list) { 1153 list_for_each_entry_safe(priv, tmp, dev_list, list) {
@@ -1181,13 +1192,16 @@ static int __init ipoib_init_module(void)
1181 goto err_fs; 1192 goto err_fs;
1182 } 1193 }
1183 1194
1195 ib_sa_register_client(&ipoib_sa_client);
1196
1184 ret = ib_register_client(&ipoib_client); 1197 ret = ib_register_client(&ipoib_client);
1185 if (ret) 1198 if (ret)
1186 goto err_wq; 1199 goto err_sa;
1187 1200
1188 return 0; 1201 return 0;
1189 1202
1190err_wq: 1203err_sa:
1204 ib_sa_unregister_client(&ipoib_sa_client);
1191 destroy_workqueue(ipoib_workqueue); 1205 destroy_workqueue(ipoib_workqueue);
1192 1206
1193err_fs: 1207err_fs:
@@ -1199,6 +1213,7 @@ err_fs:
1199static void __exit ipoib_cleanup_module(void) 1213static void __exit ipoib_cleanup_module(void)
1200{ 1214{
1201 ib_unregister_client(&ipoib_client); 1215 ib_unregister_client(&ipoib_client);
1216 ib_sa_unregister_client(&ipoib_sa_client);
1202 ipoib_unregister_debugfs(); 1217 ipoib_unregister_debugfs();
1203 destroy_workqueue(ipoib_workqueue); 1218 destroy_workqueue(ipoib_workqueue);
1204} 1219}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index ec356ce7cdcd..3faa1820f0e9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -361,7 +361,7 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
361 361
362 init_completion(&mcast->done); 362 init_completion(&mcast->done);
363 363
364 ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec, 364 ret = ib_sa_mcmember_rec_set(&ipoib_sa_client, priv->ca, priv->port, &rec,
365 IB_SA_MCMEMBER_REC_MGID | 365 IB_SA_MCMEMBER_REC_MGID |
366 IB_SA_MCMEMBER_REC_PORT_GID | 366 IB_SA_MCMEMBER_REC_PORT_GID |
367 IB_SA_MCMEMBER_REC_PKEY | 367 IB_SA_MCMEMBER_REC_PKEY |
@@ -472,22 +472,32 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
472 472
473 if (create) { 473 if (create) {
474 comp_mask |= 474 comp_mask |=
475 IB_SA_MCMEMBER_REC_QKEY | 475 IB_SA_MCMEMBER_REC_QKEY |
476 IB_SA_MCMEMBER_REC_SL | 476 IB_SA_MCMEMBER_REC_MTU_SELECTOR |
477 IB_SA_MCMEMBER_REC_FLOW_LABEL | 477 IB_SA_MCMEMBER_REC_MTU |
478 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; 478 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS |
479 IB_SA_MCMEMBER_REC_RATE_SELECTOR |
480 IB_SA_MCMEMBER_REC_RATE |
481 IB_SA_MCMEMBER_REC_SL |
482 IB_SA_MCMEMBER_REC_FLOW_LABEL |
483 IB_SA_MCMEMBER_REC_HOP_LIMIT;
479 484
480 rec.qkey = priv->broadcast->mcmember.qkey; 485 rec.qkey = priv->broadcast->mcmember.qkey;
486 rec.mtu_selector = IB_SA_EQ;
487 rec.mtu = priv->broadcast->mcmember.mtu;
488 rec.traffic_class = priv->broadcast->mcmember.traffic_class;
489 rec.rate_selector = IB_SA_EQ;
490 rec.rate = priv->broadcast->mcmember.rate;
481 rec.sl = priv->broadcast->mcmember.sl; 491 rec.sl = priv->broadcast->mcmember.sl;
482 rec.flow_label = priv->broadcast->mcmember.flow_label; 492 rec.flow_label = priv->broadcast->mcmember.flow_label;
483 rec.traffic_class = priv->broadcast->mcmember.traffic_class; 493 rec.hop_limit = priv->broadcast->mcmember.hop_limit;
484 } 494 }
485 495
486 init_completion(&mcast->done); 496 init_completion(&mcast->done);
487 497
488 ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec, comp_mask, 498 ret = ib_sa_mcmember_rec_set(&ipoib_sa_client, priv->ca, priv->port,
489 mcast->backoff * 1000, GFP_ATOMIC, 499 &rec, comp_mask, mcast->backoff * 1000,
490 ipoib_mcast_join_complete, 500 GFP_ATOMIC, ipoib_mcast_join_complete,
491 mcast, &mcast->query); 501 mcast, &mcast->query);
492 502
493 if (ret < 0) { 503 if (ret < 0) {
@@ -528,7 +538,7 @@ void ipoib_mcast_join_task(void *dev_ptr)
528 priv->local_rate = attr.active_speed * 538 priv->local_rate = attr.active_speed *
529 ib_width_enum_to_int(attr.active_width); 539 ib_width_enum_to_int(attr.active_width);
530 } else 540 } else
531 ipoib_warn(priv, "ib_query_port failed\n"); 541 ipoib_warn(priv, "ib_query_port failed\n");
532 } 542 }
533 543
534 if (!priv->broadcast) { 544 if (!priv->broadcast) {
@@ -681,7 +691,7 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
681 * Just make one shot at leaving and don't wait for a reply; 691 * Just make one shot at leaving and don't wait for a reply;
682 * if we fail, too bad. 692 * if we fail, too bad.
683 */ 693 */
684 ret = ib_sa_mcmember_rec_delete(priv->ca, priv->port, &rec, 694 ret = ib_sa_mcmember_rec_delete(&ipoib_sa_client, priv->ca, priv->port, &rec,
685 IB_SA_MCMEMBER_REC_MGID | 695 IB_SA_MCMEMBER_REC_MGID |
686 IB_SA_MCMEMBER_REC_PORT_GID | 696 IB_SA_MCMEMBER_REC_PORT_GID |
687 IB_SA_MCMEMBER_REC_PKEY | 697 IB_SA_MCMEMBER_REC_PKEY |
@@ -795,7 +805,7 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
795 } 805 }
796 806
797 if (priv->broadcast) { 807 if (priv->broadcast) {
798 rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree); 808 rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
799 list_add_tail(&priv->broadcast->list, &remove_list); 809 list_add_tail(&priv->broadcast->list, &remove_list);
800 priv->broadcast = NULL; 810 priv->broadcast = NULL;
801 } 811 }
diff --git a/drivers/infiniband/ulp/iser/Kconfig b/drivers/infiniband/ulp/iser/Kconfig
index fead87d1eff9..365a1b5f19e0 100644
--- a/drivers/infiniband/ulp/iser/Kconfig
+++ b/drivers/infiniband/ulp/iser/Kconfig
@@ -1,6 +1,6 @@
1config INFINIBAND_ISER 1config INFINIBAND_ISER
2 tristate "ISCSI RDMA Protocol" 2 tristate "ISCSI RDMA Protocol"
3 depends on INFINIBAND && SCSI 3 depends on INFINIBAND && SCSI && INET
4 select SCSI_ISCSI_ATTRS 4 select SCSI_ISCSI_ATTRS
5 ---help--- 5 ---help---
6 Support for the ISCSI RDMA Protocol over InfiniBand. This 6 Support for the ISCSI RDMA Protocol over InfiniBand. This
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 1437d7ee3b19..2a14fe2e3226 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -141,18 +141,11 @@ iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
141 141
142 if (sc->sc_data_direction == DMA_TO_DEVICE) { 142 if (sc->sc_data_direction == DMA_TO_DEVICE) {
143 BUG_ON(ctask->total_length == 0); 143 BUG_ON(ctask->total_length == 0);
144 /* bytes to be sent via RDMA operations */
145 iser_ctask->rdma_data_count = ctask->total_length -
146 ctask->imm_count -
147 ctask->unsol_count;
148 144
149 debug_scsi("cmd [itt %x total %d imm %d unsol_data %d " 145 debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
150 "rdma_data %d]\n",
151 ctask->itt, ctask->total_length, ctask->imm_count, 146 ctask->itt, ctask->total_length, ctask->imm_count,
152 ctask->unsol_count, iser_ctask->rdma_data_count); 147 ctask->unsol_count);
153 } else 148 }
154 /* bytes to be sent via RDMA operations */
155 iser_ctask->rdma_data_count = ctask->total_length;
156 149
157 iser_ctask_rdma_init(iser_ctask); 150 iser_ctask_rdma_init(iser_ctask);
158} 151}
@@ -196,13 +189,10 @@ iscsi_iser_ctask_xmit_unsol_data(struct iscsi_conn *conn,
196{ 189{
197 struct iscsi_data hdr; 190 struct iscsi_data hdr;
198 int error = 0; 191 int error = 0;
199 struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
200 192
201 /* Send data-out PDUs while there's still unsolicited data to send */ 193 /* Send data-out PDUs while there's still unsolicited data to send */
202 while (ctask->unsol_count > 0) { 194 while (ctask->unsol_count > 0) {
203 iscsi_prep_unsolicit_data_pdu(ctask, &hdr, 195 iscsi_prep_unsolicit_data_pdu(ctask, &hdr);
204 iser_ctask->rdma_data_count);
205
206 debug_scsi("Sending data-out: itt 0x%x, data count %d\n", 196 debug_scsi("Sending data-out: itt 0x%x, data count %d\n",
207 hdr.itt, ctask->data_count); 197 hdr.itt, ctask->data_count);
208 198
@@ -555,6 +545,7 @@ static struct scsi_host_template iscsi_iser_sht = {
555 .queuecommand = iscsi_queuecommand, 545 .queuecommand = iscsi_queuecommand,
556 .can_queue = ISCSI_XMIT_CMDS_MAX - 1, 546 .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
557 .sg_tablesize = ISCSI_ISER_SG_TABLESIZE, 547 .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
548 .max_sectors = 1024,
558 .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN, 549 .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
559 .eh_abort_handler = iscsi_eh_abort, 550 .eh_abort_handler = iscsi_eh_abort,
560 .eh_host_reset_handler = iscsi_eh_host_reset, 551 .eh_host_reset_handler = iscsi_eh_host_reset,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 3350ba690cfe..2cf9ae0def1c 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -82,8 +82,12 @@
82 __func__ , ## arg); \ 82 __func__ , ## arg); \
83 } while (0) 83 } while (0)
84 84
85#define SHIFT_4K 12
86#define SIZE_4K (1UL << SHIFT_4K)
87#define MASK_4K (~(SIZE_4K-1))
88
85 /* support upto 512KB in one RDMA */ 89 /* support upto 512KB in one RDMA */
86#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> PAGE_SHIFT) 90#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
87#define ISCSI_ISER_MAX_LUN 256 91#define ISCSI_ISER_MAX_LUN 256
88#define ISCSI_ISER_MAX_CMD_LEN 16 92#define ISCSI_ISER_MAX_CMD_LEN 16
89 93
@@ -171,6 +175,7 @@ struct iser_mem_reg {
171 u64 va; 175 u64 va;
172 u64 len; 176 u64 len;
173 void *mem_h; 177 void *mem_h;
178 int is_fmr;
174}; 179};
175 180
176struct iser_regd_buf { 181struct iser_regd_buf {
@@ -257,7 +262,6 @@ struct iscsi_iser_conn {
257struct iscsi_iser_cmd_task { 262struct iscsi_iser_cmd_task {
258 struct iser_desc desc; 263 struct iser_desc desc;
259 struct iscsi_iser_conn *iser_conn; 264 struct iscsi_iser_conn *iser_conn;
260 int rdma_data_count;/* RDMA bytes */
261 enum iser_task_status status; 265 enum iser_task_status status;
262 int command_sent; /* set if command sent */ 266 int command_sent; /* set if command sent */
263 int dir[ISER_DIRS_NUM]; /* set if dir use*/ 267 int dir[ISER_DIRS_NUM]; /* set if dir use*/
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 31950a522a1c..d0b03f426581 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -42,6 +42,7 @@
42#include "iscsi_iser.h" 42#include "iscsi_iser.h"
43 43
44#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */ 44#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */
45
45/** 46/**
46 * Decrements the reference count for the 47 * Decrements the reference count for the
47 * registered buffer & releases it 48 * registered buffer & releases it
@@ -55,7 +56,7 @@ int iser_regd_buff_release(struct iser_regd_buf *regd_buf)
55 if ((atomic_read(&regd_buf->ref_count) == 0) || 56 if ((atomic_read(&regd_buf->ref_count) == 0) ||
56 atomic_dec_and_test(&regd_buf->ref_count)) { 57 atomic_dec_and_test(&regd_buf->ref_count)) {
57 /* if we used the dma mr, unreg is just NOP */ 58 /* if we used the dma mr, unreg is just NOP */
58 if (regd_buf->reg.rkey != 0) 59 if (regd_buf->reg.is_fmr)
59 iser_unreg_mem(&regd_buf->reg); 60 iser_unreg_mem(&regd_buf->reg);
60 61
61 if (regd_buf->dma_addr) { 62 if (regd_buf->dma_addr) {
@@ -90,9 +91,9 @@ void iser_reg_single(struct iser_device *device,
90 BUG_ON(dma_mapping_error(dma_addr)); 91 BUG_ON(dma_mapping_error(dma_addr));
91 92
92 regd_buf->reg.lkey = device->mr->lkey; 93 regd_buf->reg.lkey = device->mr->lkey;
93 regd_buf->reg.rkey = 0; /* indicate there's no need to unreg */
94 regd_buf->reg.len = regd_buf->data_size; 94 regd_buf->reg.len = regd_buf->data_size;
95 regd_buf->reg.va = dma_addr; 95 regd_buf->reg.va = dma_addr;
96 regd_buf->reg.is_fmr = 0;
96 97
97 regd_buf->dma_addr = dma_addr; 98 regd_buf->dma_addr = dma_addr;
98 regd_buf->direction = direction; 99 regd_buf->direction = direction;
@@ -239,7 +240,7 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
239 int i; 240 int i;
240 241
241 /* compute the offset of first element */ 242 /* compute the offset of first element */
242 page_vec->offset = (u64) sg[0].offset; 243 page_vec->offset = (u64) sg[0].offset & ~MASK_4K;
243 244
244 for (i = 0; i < data->dma_nents; i++) { 245 for (i = 0; i < data->dma_nents; i++) {
245 total_sz += sg_dma_len(&sg[i]); 246 total_sz += sg_dma_len(&sg[i]);
@@ -247,21 +248,30 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
247 first_addr = sg_dma_address(&sg[i]); 248 first_addr = sg_dma_address(&sg[i]);
248 last_addr = first_addr + sg_dma_len(&sg[i]); 249 last_addr = first_addr + sg_dma_len(&sg[i]);
249 250
250 start_aligned = !(first_addr & ~PAGE_MASK); 251 start_aligned = !(first_addr & ~MASK_4K);
251 end_aligned = !(last_addr & ~PAGE_MASK); 252 end_aligned = !(last_addr & ~MASK_4K);
252 253
253 /* continue to collect page fragments till aligned or SG ends */ 254 /* continue to collect page fragments till aligned or SG ends */
254 while (!end_aligned && (i + 1 < data->dma_nents)) { 255 while (!end_aligned && (i + 1 < data->dma_nents)) {
255 i++; 256 i++;
256 total_sz += sg_dma_len(&sg[i]); 257 total_sz += sg_dma_len(&sg[i]);
257 last_addr = sg_dma_address(&sg[i]) + sg_dma_len(&sg[i]); 258 last_addr = sg_dma_address(&sg[i]) + sg_dma_len(&sg[i]);
258 end_aligned = !(last_addr & ~PAGE_MASK); 259 end_aligned = !(last_addr & ~MASK_4K);
259 } 260 }
260 261
261 first_addr = first_addr & PAGE_MASK; 262 /* handle the 1st page in the 1st DMA element */
262 263 if (cur_page == 0) {
263 for (page = first_addr; page < last_addr; page += PAGE_SIZE) 264 page = first_addr & MASK_4K;
264 page_vec->pages[cur_page++] = page; 265 page_vec->pages[cur_page] = page;
266 cur_page++;
267 page += SIZE_4K;
268 } else
269 page = first_addr;
270
271 for (; page < last_addr; page += SIZE_4K) {
272 page_vec->pages[cur_page] = page;
273 cur_page++;
274 }
265 275
266 } 276 }
267 page_vec->data_size = total_sz; 277 page_vec->data_size = total_sz;
@@ -269,8 +279,7 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
269 return cur_page; 279 return cur_page;
270} 280}
271 281
272#define MASK_4K ((1UL << 12) - 1) /* 0xFFF */ 282#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
273#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & MASK_4K) == 0)
274 283
275/** 284/**
276 * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned 285 * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
@@ -320,9 +329,9 @@ static void iser_data_buf_dump(struct iser_data_buf *data)
320 struct scatterlist *sg = (struct scatterlist *)data->buf; 329 struct scatterlist *sg = (struct scatterlist *)data->buf;
321 int i; 330 int i;
322 331
323 for (i = 0; i < data->size; i++) 332 for (i = 0; i < data->dma_nents; i++)
324 iser_err("sg[%d] dma_addr:0x%lX page:0x%p " 333 iser_err("sg[%d] dma_addr:0x%lX page:0x%p "
325 "off:%d sz:%d dma_len:%d\n", 334 "off:0x%x sz:0x%x dma_len:0x%x\n",
326 i, (unsigned long)sg_dma_address(&sg[i]), 335 i, (unsigned long)sg_dma_address(&sg[i]),
327 sg[i].page, sg[i].offset, 336 sg[i].page, sg[i].offset,
328 sg[i].length,sg_dma_len(&sg[i])); 337 sg[i].length,sg_dma_len(&sg[i]));
@@ -352,7 +361,7 @@ static void iser_page_vec_build(struct iser_data_buf *data,
352 361
353 page_vec->length = page_vec_len; 362 page_vec->length = page_vec_len;
354 363
355 if (page_vec_len * PAGE_SIZE < page_vec->data_size) { 364 if (page_vec_len * SIZE_4K < page_vec->data_size) {
356 iser_err("page_vec too short to hold this SG\n"); 365 iser_err("page_vec too short to hold this SG\n");
357 iser_data_buf_dump(data); 366 iser_data_buf_dump(data);
358 iser_dump_page_vec(page_vec); 367 iser_dump_page_vec(page_vec);
@@ -370,15 +379,18 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
370 enum iser_data_dir cmd_dir) 379 enum iser_data_dir cmd_dir)
371{ 380{
372 struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn; 381 struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn;
382 struct iser_device *device = ib_conn->device;
373 struct iser_data_buf *mem = &iser_ctask->data[cmd_dir]; 383 struct iser_data_buf *mem = &iser_ctask->data[cmd_dir];
374 struct iser_regd_buf *regd_buf; 384 struct iser_regd_buf *regd_buf;
375 int aligned_len; 385 int aligned_len;
376 int err; 386 int err;
387 int i;
388 struct scatterlist *sg;
377 389
378 regd_buf = &iser_ctask->rdma_regd[cmd_dir]; 390 regd_buf = &iser_ctask->rdma_regd[cmd_dir];
379 391
380 aligned_len = iser_data_buf_aligned_len(mem); 392 aligned_len = iser_data_buf_aligned_len(mem);
381 if (aligned_len != mem->size) { 393 if (aligned_len != mem->dma_nents) {
382 iser_err("rdma alignment violation %d/%d aligned\n", 394 iser_err("rdma alignment violation %d/%d aligned\n",
383 aligned_len, mem->size); 395 aligned_len, mem->size);
384 iser_data_buf_dump(mem); 396 iser_data_buf_dump(mem);
@@ -389,10 +401,38 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
389 mem = &iser_ctask->data_copy[cmd_dir]; 401 mem = &iser_ctask->data_copy[cmd_dir];
390 } 402 }
391 403
392 iser_page_vec_build(mem, ib_conn->page_vec); 404 /* if there a single dma entry, FMR is not needed */
393 err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg); 405 if (mem->dma_nents == 1) {
394 if (err) 406 sg = (struct scatterlist *)mem->buf;
395 return err; 407
408 regd_buf->reg.lkey = device->mr->lkey;
409 regd_buf->reg.rkey = device->mr->rkey;
410 regd_buf->reg.len = sg_dma_len(&sg[0]);
411 regd_buf->reg.va = sg_dma_address(&sg[0]);
412 regd_buf->reg.is_fmr = 0;
413
414 iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X "
415 "va: 0x%08lX sz: %ld]\n",
416 (unsigned int)regd_buf->reg.lkey,
417 (unsigned int)regd_buf->reg.rkey,
418 (unsigned long)regd_buf->reg.va,
419 (unsigned long)regd_buf->reg.len);
420 } else { /* use FMR for multiple dma entries */
421 iser_page_vec_build(mem, ib_conn->page_vec);
422 err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg);
423 if (err) {
424 iser_data_buf_dump(mem);
425 iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents,
426 ntoh24(iser_ctask->desc.iscsi_header.dlength));
427 iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
428 ib_conn->page_vec->data_size, ib_conn->page_vec->length,
429 ib_conn->page_vec->offset);
430 for (i=0 ; i<ib_conn->page_vec->length ; i++)
431 iser_err("page_vec[%d] = 0x%llx\n", i,
432 (unsigned long long) ib_conn->page_vec->pages[i]);
433 return err;
434 }
435 }
396 436
397 /* take a reference on this regd buf such that it will not be released * 437 /* take a reference on this regd buf such that it will not be released *
398 * (eg in send dto completion) before we get the scsi response */ 438 * (eg in send dto completion) before we get the scsi response */
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 72febf1f8ff8..ecdca7fc1e4c 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -88,8 +88,9 @@ static int iser_create_device_ib_res(struct iser_device *device)
88 iser_cq_tasklet_fn, 88 iser_cq_tasklet_fn,
89 (unsigned long)device); 89 (unsigned long)device);
90 90
91 device->mr = ib_get_dma_mr(device->pd, 91 device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
92 IB_ACCESS_LOCAL_WRITE); 92 IB_ACCESS_REMOTE_WRITE |
93 IB_ACCESS_REMOTE_READ);
93 if (IS_ERR(device->mr)) 94 if (IS_ERR(device->mr))
94 goto dma_mr_err; 95 goto dma_mr_err;
95 96
@@ -150,7 +151,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
150 } 151 }
151 ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1); 152 ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1);
152 153
153 params.page_shift = PAGE_SHIFT; 154 params.page_shift = SHIFT_4K;
154 /* when the first/last SG element are not start/end * 155 /* when the first/last SG element are not start/end *
155 * page aligned, the map whould be of N+1 pages */ 156 * page aligned, the map whould be of N+1 pages */
156 params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1; 157 params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
@@ -604,8 +605,9 @@ int iser_reg_page_vec(struct iser_conn *ib_conn,
604 605
605 mem_reg->lkey = mem->fmr->lkey; 606 mem_reg->lkey = mem->fmr->lkey;
606 mem_reg->rkey = mem->fmr->rkey; 607 mem_reg->rkey = mem->fmr->rkey;
607 mem_reg->len = page_vec->length * PAGE_SIZE; 608 mem_reg->len = page_vec->length * SIZE_4K;
608 mem_reg->va = io_addr; 609 mem_reg->va = io_addr;
610 mem_reg->is_fmr = 1;
609 mem_reg->mem_h = (void *)mem; 611 mem_reg->mem_h = (void *)mem;
610 612
611 mem_reg->va += page_vec->offset; 613 mem_reg->va += page_vec->offset;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index fd8344cdc0db..44b9e5be6687 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -96,6 +96,8 @@ static struct ib_client srp_client = {
96 .remove = srp_remove_one 96 .remove = srp_remove_one
97}; 97};
98 98
99static struct ib_sa_client srp_sa_client;
100
99static inline struct srp_target_port *host_to_target(struct Scsi_Host *host) 101static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
100{ 102{
101 return (struct srp_target_port *) host->hostdata; 103 return (struct srp_target_port *) host->hostdata;
@@ -267,7 +269,8 @@ static int srp_lookup_path(struct srp_target_port *target)
267 269
268 init_completion(&target->done); 270 init_completion(&target->done);
269 271
270 target->path_query_id = ib_sa_path_rec_get(target->srp_host->dev->dev, 272 target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
273 target->srp_host->dev->dev,
271 target->srp_host->port, 274 target->srp_host->port,
272 &target->path, 275 &target->path,
273 IB_SA_PATH_REC_DGID | 276 IB_SA_PATH_REC_DGID |
@@ -330,7 +333,7 @@ static int srp_send_req(struct srp_target_port *target)
330 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | 333 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
331 SRP_BUF_FORMAT_INDIRECT); 334 SRP_BUF_FORMAT_INDIRECT);
332 /* 335 /*
333 * In the published SRP specification (draft rev. 16a), the 336 * In the published SRP specification (draft rev. 16a), the
334 * port identifier format is 8 bytes of ID extension followed 337 * port identifier format is 8 bytes of ID extension followed
335 * by 8 bytes of GUID. Older drafts put the two halves in the 338 * by 8 bytes of GUID. Older drafts put the two halves in the
336 * opposite order, so that the GUID comes first. 339 * opposite order, so that the GUID comes first.
@@ -1449,12 +1452,28 @@ static ssize_t show_zero_req_lim(struct class_device *cdev, char *buf)
1449 return sprintf(buf, "%d\n", target->zero_req_lim); 1452 return sprintf(buf, "%d\n", target->zero_req_lim);
1450} 1453}
1451 1454
1452static CLASS_DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL); 1455static ssize_t show_local_ib_port(struct class_device *cdev, char *buf)
1453static CLASS_DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); 1456{
1454static CLASS_DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); 1457 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1455static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 1458
1456static CLASS_DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); 1459 return sprintf(buf, "%d\n", target->srp_host->port);
1457static CLASS_DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); 1460}
1461
1462static ssize_t show_local_ib_device(struct class_device *cdev, char *buf)
1463{
1464 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1465
1466 return sprintf(buf, "%s\n", target->srp_host->dev->dev->name);
1467}
1468
1469static CLASS_DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
1470static CLASS_DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
1471static CLASS_DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
1472static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1473static CLASS_DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
1474static CLASS_DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
1475static CLASS_DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
1476static CLASS_DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
1458 1477
1459static struct class_device_attribute *srp_host_attrs[] = { 1478static struct class_device_attribute *srp_host_attrs[] = {
1460 &class_device_attr_id_ext, 1479 &class_device_attr_id_ext,
@@ -1463,6 +1482,8 @@ static struct class_device_attribute *srp_host_attrs[] = {
1463 &class_device_attr_pkey, 1482 &class_device_attr_pkey,
1464 &class_device_attr_dgid, 1483 &class_device_attr_dgid,
1465 &class_device_attr_zero_req_lim, 1484 &class_device_attr_zero_req_lim,
1485 &class_device_attr_local_ib_port,
1486 &class_device_attr_local_ib_device,
1466 NULL 1487 NULL
1467}; 1488};
1468 1489
@@ -1881,7 +1902,7 @@ static void srp_add_one(struct ib_device *device)
1881 if (IS_ERR(srp_dev->fmr_pool)) 1902 if (IS_ERR(srp_dev->fmr_pool))
1882 srp_dev->fmr_pool = NULL; 1903 srp_dev->fmr_pool = NULL;
1883 1904
1884 if (device->node_type == IB_NODE_SWITCH) { 1905 if (device->node_type == RDMA_NODE_IB_SWITCH) {
1885 s = 0; 1906 s = 0;
1886 e = 0; 1907 e = 0;
1887 } else { 1908 } else {
@@ -1980,9 +2001,12 @@ static int __init srp_init_module(void)
1980 return ret; 2001 return ret;
1981 } 2002 }
1982 2003
2004 ib_sa_register_client(&srp_sa_client);
2005
1983 ret = ib_register_client(&srp_client); 2006 ret = ib_register_client(&srp_client);
1984 if (ret) { 2007 if (ret) {
1985 printk(KERN_ERR PFX "couldn't register IB client\n"); 2008 printk(KERN_ERR PFX "couldn't register IB client\n");
2009 ib_sa_unregister_client(&srp_sa_client);
1986 class_unregister(&srp_class); 2010 class_unregister(&srp_class);
1987 return ret; 2011 return ret;
1988 } 2012 }
@@ -1993,6 +2017,7 @@ static int __init srp_init_module(void)
1993static void __exit srp_cleanup_module(void) 2017static void __exit srp_cleanup_module(void)
1994{ 2018{
1995 ib_unregister_client(&srp_client); 2019 ib_unregister_client(&srp_client);
2020 ib_sa_unregister_client(&srp_sa_client);
1996 class_unregister(&srp_class); 2021 class_unregister(&srp_class);
1997} 2022}
1998 2023
diff --git a/drivers/macintosh/adbhid.c b/drivers/macintosh/adbhid.c
index c69d23bb255e..efd51e01c06e 100644
--- a/drivers/macintosh/adbhid.c
+++ b/drivers/macintosh/adbhid.c
@@ -45,8 +45,8 @@
45#include <linux/pmu.h> 45#include <linux/pmu.h>
46 46
47#include <asm/machdep.h> 47#include <asm/machdep.h>
48#include <asm/backlight.h>
49#ifdef CONFIG_PPC_PMAC 48#ifdef CONFIG_PPC_PMAC
49#include <asm/backlight.h>
50#include <asm/pmac_feature.h> 50#include <asm/pmac_feature.h>
51#endif 51#endif
52 52
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 82657bc86d19..d56216067549 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -139,7 +139,9 @@ static int macio_uevent(struct device *dev, char **envp, int num_envp,
139{ 139{
140 struct macio_dev * macio_dev; 140 struct macio_dev * macio_dev;
141 struct of_device * of; 141 struct of_device * of;
142 char *scratch, *compat, *compat2; 142 char *scratch;
143 const char *compat, *compat2;
144
143 int i = 0; 145 int i = 0;
144 int length, cplen, cplen2, seen = 0; 146 int length, cplen, cplen2, seen = 0;
145 147
@@ -173,7 +175,7 @@ static int macio_uevent(struct device *dev, char **envp, int num_envp,
173 * it's not really legal to split it out with commas. We split it 175 * it's not really legal to split it out with commas. We split it
174 * up using a number of environment variables instead. */ 176 * up using a number of environment variables instead. */
175 177
176 compat = (char *) get_property(of->node, "compatible", &cplen); 178 compat = get_property(of->node, "compatible", &cplen);
177 compat2 = compat; 179 compat2 = compat;
178 cplen2= cplen; 180 cplen2= cplen;
179 while (compat && cplen > 0) { 181 while (compat && cplen > 0) {
@@ -454,7 +456,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
454 struct resource *parent_res) 456 struct resource *parent_res)
455{ 457{
456 struct macio_dev *dev; 458 struct macio_dev *dev;
457 u32 *reg; 459 const u32 *reg;
458 460
459 if (np == NULL) 461 if (np == NULL)
460 return NULL; 462 return NULL;
@@ -489,7 +491,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
489#endif 491#endif
490 MAX_NODE_NAME_SIZE, np->name); 492 MAX_NODE_NAME_SIZE, np->name);
491 } else { 493 } else {
492 reg = (u32 *)get_property(np, "reg", NULL); 494 reg = get_property(np, "reg", NULL);
493 sprintf(dev->ofdev.dev.bus_id, "%1d.%08x:%.*s", 495 sprintf(dev->ofdev.dev.bus_id, "%1d.%08x:%.*s",
494 chip->lbus.index, 496 chip->lbus.index,
495 reg ? *reg : 0, MAX_NODE_NAME_SIZE, np->name); 497 reg ? *reg : 0, MAX_NODE_NAME_SIZE, np->name);
diff --git a/drivers/macintosh/macio_sysfs.c b/drivers/macintosh/macio_sysfs.c
index cae24a13526a..8566bdfdd4b8 100644
--- a/drivers/macintosh/macio_sysfs.c
+++ b/drivers/macintosh/macio_sysfs.c
@@ -16,12 +16,12 @@ static ssize_t
16compatible_show (struct device *dev, struct device_attribute *attr, char *buf) 16compatible_show (struct device *dev, struct device_attribute *attr, char *buf)
17{ 17{
18 struct of_device *of; 18 struct of_device *of;
19 char *compat; 19 const char *compat;
20 int cplen; 20 int cplen;
21 int length = 0; 21 int length = 0;
22 22
23 of = &to_macio_device (dev)->ofdev; 23 of = &to_macio_device (dev)->ofdev;
24 compat = (char *) get_property(of->node, "compatible", &cplen); 24 compat = get_property(of->node, "compatible", &cplen);
25 if (!compat) { 25 if (!compat) {
26 *buf = '\0'; 26 *buf = '\0';
27 return 0; 27 return 0;
@@ -42,12 +42,12 @@ static ssize_t modalias_show (struct device *dev, struct device_attribute *attr,
42 char *buf) 42 char *buf)
43{ 43{
44 struct of_device *of; 44 struct of_device *of;
45 char *compat; 45 const char *compat;
46 int cplen; 46 int cplen;
47 int length; 47 int length;
48 48
49 of = &to_macio_device (dev)->ofdev; 49 of = &to_macio_device (dev)->ofdev;
50 compat = (char *) get_property (of->node, "compatible", &cplen); 50 compat = get_property(of->node, "compatible", &cplen);
51 if (!compat) compat = "", cplen = 1; 51 if (!compat) compat = "", cplen = 1;
52 length = sprintf (buf, "of:N%sT%s", of->node->name, of->node->type); 52 length = sprintf (buf, "of:N%sT%s", of->node->name, of->node->type);
53 buf += length; 53 buf += length;
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 00ef46898147..090e40fc5013 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -454,7 +454,7 @@ EXPORT_SYMBOL(smu_present);
454int __init smu_init (void) 454int __init smu_init (void)
455{ 455{
456 struct device_node *np; 456 struct device_node *np;
457 u32 *data; 457 const u32 *data;
458 458
459 np = of_find_node_by_type(NULL, "smu"); 459 np = of_find_node_by_type(NULL, "smu");
460 if (np == NULL) 460 if (np == NULL)
@@ -490,7 +490,7 @@ int __init smu_init (void)
490 printk(KERN_ERR "SMU: Can't find doorbell GPIO !\n"); 490 printk(KERN_ERR "SMU: Can't find doorbell GPIO !\n");
491 goto fail; 491 goto fail;
492 } 492 }
493 data = (u32 *)get_property(smu->db_node, "reg", NULL); 493 data = get_property(smu->db_node, "reg", NULL);
494 if (data == NULL) { 494 if (data == NULL) {
495 of_node_put(smu->db_node); 495 of_node_put(smu->db_node);
496 smu->db_node = NULL; 496 smu->db_node = NULL;
@@ -511,7 +511,7 @@ int __init smu_init (void)
511 smu->msg_node = of_find_node_by_name(NULL, "smu-interrupt"); 511 smu->msg_node = of_find_node_by_name(NULL, "smu-interrupt");
512 if (smu->msg_node == NULL) 512 if (smu->msg_node == NULL)
513 break; 513 break;
514 data = (u32 *)get_property(smu->msg_node, "reg", NULL); 514 data = get_property(smu->msg_node, "reg", NULL);
515 if (data == NULL) { 515 if (data == NULL) {
516 of_node_put(smu->msg_node); 516 of_node_put(smu->msg_node);
517 smu->msg_node = NULL; 517 smu->msg_node = NULL;
@@ -982,11 +982,11 @@ static struct smu_sdbp_header *smu_create_sdb_partition(int id)
982/* Note: Only allowed to return error code in pointers (using ERR_PTR) 982/* Note: Only allowed to return error code in pointers (using ERR_PTR)
983 * when interruptible is 1 983 * when interruptible is 1
984 */ 984 */
985struct smu_sdbp_header *__smu_get_sdb_partition(int id, unsigned int *size, 985const struct smu_sdbp_header *__smu_get_sdb_partition(int id,
986 int interruptible) 986 unsigned int *size, int interruptible)
987{ 987{
988 char pname[32]; 988 char pname[32];
989 struct smu_sdbp_header *part; 989 const struct smu_sdbp_header *part;
990 990
991 if (!smu) 991 if (!smu)
992 return NULL; 992 return NULL;
@@ -1003,8 +1003,7 @@ struct smu_sdbp_header *__smu_get_sdb_partition(int id, unsigned int *size,
1003 } else 1003 } else
1004 mutex_lock(&smu_part_access); 1004 mutex_lock(&smu_part_access);
1005 1005
1006 part = (struct smu_sdbp_header *)get_property(smu->of_node, 1006 part = get_property(smu->of_node, pname, size);
1007 pname, size);
1008 if (part == NULL) { 1007 if (part == NULL) {
1009 DPRINTK("trying to extract from SMU ...\n"); 1008 DPRINTK("trying to extract from SMU ...\n");
1010 part = smu_create_sdb_partition(id); 1009 part = smu_create_sdb_partition(id);
@@ -1015,7 +1014,7 @@ struct smu_sdbp_header *__smu_get_sdb_partition(int id, unsigned int *size,
1015 return part; 1014 return part;
1016} 1015}
1017 1016
1018struct smu_sdbp_header *smu_get_sdb_partition(int id, unsigned int *size) 1017const struct smu_sdbp_header *smu_get_sdb_partition(int id, unsigned int *size)
1019{ 1018{
1020 return __smu_get_sdb_partition(id, size, 0); 1019 return __smu_get_sdb_partition(id, size, 0);
1021} 1020}
@@ -1094,7 +1093,7 @@ static ssize_t smu_write(struct file *file, const char __user *buf,
1094 pp->mode = smu_file_events; 1093 pp->mode = smu_file_events;
1095 return 0; 1094 return 0;
1096 } else if (hdr.cmdtype == SMU_CMDTYPE_GET_PARTITION) { 1095 } else if (hdr.cmdtype == SMU_CMDTYPE_GET_PARTITION) {
1097 struct smu_sdbp_header *part; 1096 const struct smu_sdbp_header *part;
1098 part = __smu_get_sdb_partition(hdr.cmd, NULL, 1); 1097 part = __smu_get_sdb_partition(hdr.cmd, NULL, 1);
1099 if (part == NULL) 1098 if (part == NULL)
1100 return -EINVAL; 1099 return -EINVAL;
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index 7f86478bdd36..a0f30d0853ea 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -47,7 +47,7 @@ static u8 FAN_SPD_SET[2] = {0x30, 0x31};
47 47
48static u8 default_limits_local[3] = {70, 50, 70}; /* local, sensor1, sensor2 */ 48static u8 default_limits_local[3] = {70, 50, 70}; /* local, sensor1, sensor2 */
49static u8 default_limits_chip[3] = {80, 65, 80}; /* local, sensor1, sensor2 */ 49static u8 default_limits_chip[3] = {80, 65, 80}; /* local, sensor1, sensor2 */
50static char *sensor_location[3] = {NULL, NULL, NULL}; 50static const char *sensor_location[3] = {NULL, NULL, NULL};
51 51
52static int limit_adjust = 0; 52static int limit_adjust = 0;
53static int fan_speed = -1; 53static int fan_speed = -1;
@@ -553,7 +553,7 @@ static int __init
553thermostat_init(void) 553thermostat_init(void)
554{ 554{
555 struct device_node* np; 555 struct device_node* np;
556 u32 *prop; 556 const u32 *prop;
557 int i = 0, offset = 0; 557 int i = 0, offset = 0;
558 558
559 np = of_find_node_by_name(NULL, "fan"); 559 np = of_find_node_by_name(NULL, "fan");
@@ -566,13 +566,13 @@ thermostat_init(void)
566 else 566 else
567 return -ENODEV; 567 return -ENODEV;
568 568
569 prop = (u32 *)get_property(np, "hwsensor-params-version", NULL); 569 prop = get_property(np, "hwsensor-params-version", NULL);
570 printk(KERN_INFO "adt746x: version %d (%ssupported)\n", *prop, 570 printk(KERN_INFO "adt746x: version %d (%ssupported)\n", *prop,
571 (*prop == 1)?"":"un"); 571 (*prop == 1)?"":"un");
572 if (*prop != 1) 572 if (*prop != 1)
573 return -ENODEV; 573 return -ENODEV;
574 574
575 prop = (u32 *)get_property(np, "reg", NULL); 575 prop = get_property(np, "reg", NULL);
576 if (!prop) 576 if (!prop)
577 return -ENODEV; 577 return -ENODEV;
578 578
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index 20bf67244e2c..d00c0c37a12e 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -660,7 +660,7 @@ static int read_eeprom(int cpu, struct mpu_data *out)
660{ 660{
661 struct device_node *np; 661 struct device_node *np;
662 char nodename[64]; 662 char nodename[64];
663 u8 *data; 663 const u8 *data;
664 int len; 664 int len;
665 665
666 /* prom.c routine for finding a node by path is a bit brain dead 666 /* prom.c routine for finding a node by path is a bit brain dead
@@ -673,7 +673,7 @@ static int read_eeprom(int cpu, struct mpu_data *out)
673 printk(KERN_ERR "therm_pm72: Failed to retrieve cpuid node from device-tree\n"); 673 printk(KERN_ERR "therm_pm72: Failed to retrieve cpuid node from device-tree\n");
674 return -ENODEV; 674 return -ENODEV;
675 } 675 }
676 data = (u8 *)get_property(np, "cpuid", &len); 676 data = get_property(np, "cpuid", &len);
677 if (data == NULL) { 677 if (data == NULL) {
678 printk(KERN_ERR "therm_pm72: Failed to retrieve cpuid property from device-tree\n"); 678 printk(KERN_ERR "therm_pm72: Failed to retrieve cpuid property from device-tree\n");
679 of_node_put(np); 679 of_node_put(np);
@@ -1336,7 +1336,7 @@ static int init_backside_state(struct backside_pid_state *state)
1336 */ 1336 */
1337 u3 = of_find_node_by_path("/u3@0,f8000000"); 1337 u3 = of_find_node_by_path("/u3@0,f8000000");
1338 if (u3 != NULL) { 1338 if (u3 != NULL) {
1339 u32 *vers = (u32 *)get_property(u3, "device-rev", NULL); 1339 const u32 *vers = get_property(u3, "device-rev", NULL);
1340 if (vers) 1340 if (vers)
1341 if (((*vers) & 0x3f) < 0x34) 1341 if (((*vers) & 0x3f) < 0x34)
1342 u3h = 0; 1342 u3h = 0;
@@ -2111,8 +2111,8 @@ static void fcu_lookup_fans(struct device_node *fcu_node)
2111 2111
2112 while ((np = of_get_next_child(fcu_node, np)) != NULL) { 2112 while ((np = of_get_next_child(fcu_node, np)) != NULL) {
2113 int type = -1; 2113 int type = -1;
2114 char *loc; 2114 const char *loc;
2115 u32 *reg; 2115 const u32 *reg;
2116 2116
2117 DBG(" control: %s, type: %s\n", np->name, np->type); 2117 DBG(" control: %s, type: %s\n", np->name, np->type);
2118 2118
@@ -2128,8 +2128,8 @@ static void fcu_lookup_fans(struct device_node *fcu_node)
2128 continue; 2128 continue;
2129 2129
2130 /* Lookup for a matching location */ 2130 /* Lookup for a matching location */
2131 loc = (char *)get_property(np, "location", NULL); 2131 loc = get_property(np, "location", NULL);
2132 reg = (u32 *)get_property(np, "reg", NULL); 2132 reg = get_property(np, "reg", NULL);
2133 if (loc == NULL || reg == NULL) 2133 if (loc == NULL || reg == NULL)
2134 continue; 2134 continue;
2135 DBG(" matching location: %s, reg: 0x%08x\n", loc, *reg); 2135 DBG(" matching location: %s, reg: 0x%08x\n", loc, *reg);
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index c7d1c290cb0c..738faab1b22c 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -484,14 +484,14 @@ struct apple_thermal_info {
484static int __init 484static int __init
485g4fan_init( void ) 485g4fan_init( void )
486{ 486{
487 struct apple_thermal_info *info; 487 const struct apple_thermal_info *info;
488 struct device_node *np; 488 struct device_node *np;
489 489
490 init_MUTEX( &x.lock ); 490 init_MUTEX( &x.lock );
491 491
492 if( !(np=of_find_node_by_name(NULL, "power-mgt")) ) 492 if( !(np=of_find_node_by_name(NULL, "power-mgt")) )
493 return -ENODEV; 493 return -ENODEV;
494 info = (struct apple_thermal_info*)get_property(np, "thermal-info", NULL); 494 info = get_property(np, "thermal-info", NULL);
495 of_node_put(np); 495 of_node_put(np);
496 496
497 if( !info || !machine_is_compatible("PowerMac3,6") ) 497 if( !info || !machine_is_compatible("PowerMac3,6") )
diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c
index 69d5452fd22f..7512d1c15207 100644
--- a/drivers/macintosh/via-cuda.c
+++ b/drivers/macintosh/via-cuda.c
@@ -123,7 +123,7 @@ int __init find_via_cuda(void)
123{ 123{
124 struct adb_request req; 124 struct adb_request req;
125 phys_addr_t taddr; 125 phys_addr_t taddr;
126 u32 *reg; 126 const u32 *reg;
127 int err; 127 int err;
128 128
129 if (vias != 0) 129 if (vias != 0)
@@ -132,7 +132,7 @@ int __init find_via_cuda(void)
132 if (vias == 0) 132 if (vias == 0)
133 return 0; 133 return 0;
134 134
135 reg = (u32 *)get_property(vias, "reg", NULL); 135 reg = get_property(vias, "reg", NULL);
136 if (reg == NULL) { 136 if (reg == NULL) {
137 printk(KERN_ERR "via-cuda: No \"reg\" property !\n"); 137 printk(KERN_ERR "via-cuda: No \"reg\" property !\n");
138 goto fail; 138 goto fail;
diff --git a/drivers/macintosh/via-pmu-led.c b/drivers/macintosh/via-pmu-led.c
index 5189d5454b1f..179af10105d9 100644
--- a/drivers/macintosh/via-pmu-led.c
+++ b/drivers/macintosh/via-pmu-led.c
@@ -120,7 +120,7 @@ static int __init via_pmu_led_init(void)
120 dt = of_find_node_by_path("/"); 120 dt = of_find_node_by_path("/");
121 if (dt == NULL) 121 if (dt == NULL)
122 return -ENODEV; 122 return -ENODEV;
123 model = (const char *)get_property(dt, "model", NULL); 123 model = get_property(dt, "model", NULL);
124 if (model == NULL) 124 if (model == NULL)
125 return -ENODEV; 125 return -ENODEV;
126 if (strncmp(model, "PowerBook", strlen("PowerBook")) != 0 && 126 if (strncmp(model, "PowerBook", strlen("PowerBook")) != 0 &&
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 14610a63f580..dda03985dcf5 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -280,7 +280,7 @@ static char *pbook_type[] = {
280int __init find_via_pmu(void) 280int __init find_via_pmu(void)
281{ 281{
282 u64 taddr; 282 u64 taddr;
283 u32 *reg; 283 const u32 *reg;
284 284
285 if (via != 0) 285 if (via != 0)
286 return 1; 286 return 1;
@@ -288,7 +288,7 @@ int __init find_via_pmu(void)
288 if (vias == NULL) 288 if (vias == NULL)
289 return 0; 289 return 0;
290 290
291 reg = (u32 *)get_property(vias, "reg", NULL); 291 reg = get_property(vias, "reg", NULL);
292 if (reg == NULL) { 292 if (reg == NULL) {
293 printk(KERN_ERR "via-pmu: No \"reg\" property !\n"); 293 printk(KERN_ERR "via-pmu: No \"reg\" property !\n");
294 goto fail; 294 goto fail;
@@ -330,7 +330,7 @@ int __init find_via_pmu(void)
330 330
331 gpiop = of_find_node_by_name(NULL, "gpio"); 331 gpiop = of_find_node_by_name(NULL, "gpio");
332 if (gpiop) { 332 if (gpiop) {
333 reg = (u32 *)get_property(gpiop, "reg", NULL); 333 reg = get_property(gpiop, "reg", NULL);
334 if (reg) 334 if (reg)
335 gaddr = of_translate_address(gpiop, reg); 335 gaddr = of_translate_address(gpiop, reg);
336 if (gaddr != OF_BAD_ADDR) 336 if (gaddr != OF_BAD_ADDR)
@@ -479,9 +479,9 @@ static int __init via_pmu_dev_init(void)
479 pmu_batteries[1].flags |= PMU_BATT_TYPE_SMART; 479 pmu_batteries[1].flags |= PMU_BATT_TYPE_SMART;
480 } else { 480 } else {
481 struct device_node* prim = find_devices("power-mgt"); 481 struct device_node* prim = find_devices("power-mgt");
482 u32 *prim_info = NULL; 482 const u32 *prim_info = NULL;
483 if (prim) 483 if (prim)
484 prim_info = (u32 *)get_property(prim, "prim-info", NULL); 484 prim_info = get_property(prim, "prim-info", NULL);
485 if (prim_info) { 485 if (prim_info) {
486 /* Other stuffs here yet unknown */ 486 /* Other stuffs here yet unknown */
487 pmu_battery_count = (prim_info[6] >> 16) & 0xff; 487 pmu_battery_count = (prim_info[6] >> 16) & 0xff;
diff --git a/drivers/macintosh/windfarm_pm81.c b/drivers/macintosh/windfarm_pm81.c
index f1df6efcbe68..2ff546e4c92f 100644
--- a/drivers/macintosh/windfarm_pm81.c
+++ b/drivers/macintosh/windfarm_pm81.c
@@ -396,7 +396,7 @@ static void wf_smu_sys_fans_tick(struct wf_smu_sys_fans_state *st)
396static void wf_smu_create_cpu_fans(void) 396static void wf_smu_create_cpu_fans(void)
397{ 397{
398 struct wf_cpu_pid_param pid_param; 398 struct wf_cpu_pid_param pid_param;
399 struct smu_sdbp_header *hdr; 399 const struct smu_sdbp_header *hdr;
400 struct smu_sdbp_cpupiddata *piddata; 400 struct smu_sdbp_cpupiddata *piddata;
401 struct smu_sdbp_fvt *fvt; 401 struct smu_sdbp_fvt *fvt;
402 s32 tmax, tdelta, maxpow, powadj; 402 s32 tmax, tdelta, maxpow, powadj;
@@ -702,7 +702,7 @@ static struct notifier_block wf_smu_events = {
702 702
703static int wf_init_pm(void) 703static int wf_init_pm(void)
704{ 704{
705 struct smu_sdbp_header *hdr; 705 const struct smu_sdbp_header *hdr;
706 706
707 hdr = smu_get_sdb_partition(SMU_SDB_SENSORTREE_ID, NULL); 707 hdr = smu_get_sdb_partition(SMU_SDB_SENSORTREE_ID, NULL);
708 if (hdr != 0) { 708 if (hdr != 0) {
diff --git a/drivers/macintosh/windfarm_pm91.c b/drivers/macintosh/windfarm_pm91.c
index 0d6372e96d32..59e9ffe37c39 100644
--- a/drivers/macintosh/windfarm_pm91.c
+++ b/drivers/macintosh/windfarm_pm91.c
@@ -144,7 +144,7 @@ static struct wf_smu_slots_fans_state *wf_smu_slots_fans;
144static void wf_smu_create_cpu_fans(void) 144static void wf_smu_create_cpu_fans(void)
145{ 145{
146 struct wf_cpu_pid_param pid_param; 146 struct wf_cpu_pid_param pid_param;
147 struct smu_sdbp_header *hdr; 147 const struct smu_sdbp_header *hdr;
148 struct smu_sdbp_cpupiddata *piddata; 148 struct smu_sdbp_cpupiddata *piddata;
149 struct smu_sdbp_fvt *fvt; 149 struct smu_sdbp_fvt *fvt;
150 s32 tmax, tdelta, maxpow, powadj; 150 s32 tmax, tdelta, maxpow, powadj;
diff --git a/drivers/macintosh/windfarm_smu_controls.c b/drivers/macintosh/windfarm_smu_controls.c
index a9e88edc0c72..bff1f372f188 100644
--- a/drivers/macintosh/windfarm_smu_controls.c
+++ b/drivers/macintosh/windfarm_smu_controls.c
@@ -159,14 +159,15 @@ static struct smu_fan_control *smu_fan_create(struct device_node *node,
159 int pwm_fan) 159 int pwm_fan)
160{ 160{
161 struct smu_fan_control *fct; 161 struct smu_fan_control *fct;
162 s32 *v; u32 *reg; 162 const s32 *v;
163 char *l; 163 const u32 *reg;
164 const char *l;
164 165
165 fct = kmalloc(sizeof(struct smu_fan_control), GFP_KERNEL); 166 fct = kmalloc(sizeof(struct smu_fan_control), GFP_KERNEL);
166 if (fct == NULL) 167 if (fct == NULL)
167 return NULL; 168 return NULL;
168 fct->ctrl.ops = &smu_fan_ops; 169 fct->ctrl.ops = &smu_fan_ops;
169 l = (char *)get_property(node, "location", NULL); 170 l = get_property(node, "location", NULL);
170 if (l == NULL) 171 if (l == NULL)
171 goto fail; 172 goto fail;
172 173
@@ -223,17 +224,17 @@ static struct smu_fan_control *smu_fan_create(struct device_node *node,
223 goto fail; 224 goto fail;
224 225
225 /* Get min & max values*/ 226 /* Get min & max values*/
226 v = (s32 *)get_property(node, "min-value", NULL); 227 v = get_property(node, "min-value", NULL);
227 if (v == NULL) 228 if (v == NULL)
228 goto fail; 229 goto fail;
229 fct->min = *v; 230 fct->min = *v;
230 v = (s32 *)get_property(node, "max-value", NULL); 231 v = get_property(node, "max-value", NULL);
231 if (v == NULL) 232 if (v == NULL)
232 goto fail; 233 goto fail;
233 fct->max = *v; 234 fct->max = *v;
234 235
235 /* Get "reg" value */ 236 /* Get "reg" value */
236 reg = (u32 *)get_property(node, "reg", NULL); 237 reg = get_property(node, "reg", NULL);
237 if (reg == NULL) 238 if (reg == NULL)
238 goto fail; 239 goto fail;
239 fct->reg = *reg; 240 fct->reg = *reg;
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index e295a07a1ebc..aceb61d9fbc8 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -233,15 +233,15 @@ static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev)
233{ 233{
234 struct wf_sat *sat; 234 struct wf_sat *sat;
235 struct wf_sat_sensor *sens; 235 struct wf_sat_sensor *sens;
236 u32 *reg; 236 const u32 *reg;
237 char *loc, *type; 237 const char *loc, *type;
238 u8 addr, chip, core; 238 u8 addr, chip, core;
239 struct device_node *child; 239 struct device_node *child;
240 int shift, cpu, index; 240 int shift, cpu, index;
241 char *name; 241 char *name;
242 int vsens[2], isens[2]; 242 int vsens[2], isens[2];
243 243
244 reg = (u32 *) get_property(dev, "reg", NULL); 244 reg = get_property(dev, "reg", NULL);
245 if (reg == NULL) 245 if (reg == NULL)
246 return; 246 return;
247 addr = *reg; 247 addr = *reg;
@@ -268,7 +268,7 @@ static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev)
268 isens[0] = isens[1] = -1; 268 isens[0] = isens[1] = -1;
269 child = NULL; 269 child = NULL;
270 while ((child = of_get_next_child(dev, child)) != NULL) { 270 while ((child = of_get_next_child(dev, child)) != NULL) {
271 reg = (u32 *) get_property(child, "reg", NULL); 271 reg = get_property(child, "reg", NULL);
272 type = get_property(child, "device_type", NULL); 272 type = get_property(child, "device_type", NULL);
273 loc = get_property(child, "location", NULL); 273 loc = get_property(child, "location", NULL);
274 if (reg == NULL || loc == NULL) 274 if (reg == NULL || loc == NULL)
diff --git a/drivers/macintosh/windfarm_smu_sensors.c b/drivers/macintosh/windfarm_smu_sensors.c
index bed25dcf8a1e..defe9922ebd1 100644
--- a/drivers/macintosh/windfarm_smu_sensors.c
+++ b/drivers/macintosh/windfarm_smu_sensors.c
@@ -198,14 +198,14 @@ static struct wf_sensor_ops smu_slotspow_ops = {
198static struct smu_ad_sensor *smu_ads_create(struct device_node *node) 198static struct smu_ad_sensor *smu_ads_create(struct device_node *node)
199{ 199{
200 struct smu_ad_sensor *ads; 200 struct smu_ad_sensor *ads;
201 char *c, *l; 201 const char *c, *l;
202 u32 *v; 202 const u32 *v;
203 203
204 ads = kmalloc(sizeof(struct smu_ad_sensor), GFP_KERNEL); 204 ads = kmalloc(sizeof(struct smu_ad_sensor), GFP_KERNEL);
205 if (ads == NULL) 205 if (ads == NULL)
206 return NULL; 206 return NULL;
207 c = (char *)get_property(node, "device_type", NULL); 207 c = get_property(node, "device_type", NULL);
208 l = (char *)get_property(node, "location", NULL); 208 l = get_property(node, "location", NULL);
209 if (c == NULL || l == NULL) 209 if (c == NULL || l == NULL)
210 goto fail; 210 goto fail;
211 211
@@ -255,7 +255,7 @@ static struct smu_ad_sensor *smu_ads_create(struct device_node *node)
255 } else 255 } else
256 goto fail; 256 goto fail;
257 257
258 v = (u32 *)get_property(node, "reg", NULL); 258 v = get_property(node, "reg", NULL);
259 if (v == NULL) 259 if (v == NULL)
260 goto fail; 260 goto fail;
261 ads->reg = *v; 261 ads->reg = *v;
@@ -382,7 +382,7 @@ smu_cpu_power_create(struct wf_sensor *volts, struct wf_sensor *amps)
382 382
383static void smu_fetch_param_partitions(void) 383static void smu_fetch_param_partitions(void)
384{ 384{
385 struct smu_sdbp_header *hdr; 385 const struct smu_sdbp_header *hdr;
386 386
387 /* Get CPU voltage/current/power calibration data */ 387 /* Get CPU voltage/current/power calibration data */
388 hdr = smu_get_sdb_partition(SMU_SDB_CPUVCP_ID, NULL); 388 hdr = smu_get_sdb_partition(SMU_SDB_CPUVCP_ID, NULL);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 6022ed12a795..bdbd34993a80 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -5,6 +5,7 @@
5 * This file is released under the GPL. 5 * This file is released under the GPL.
6 */ 6 */
7 7
8#include <linux/err.h>
8#include <linux/module.h> 9#include <linux/module.h>
9#include <linux/init.h> 10#include <linux/init.h>
10#include <linux/kernel.h> 11#include <linux/kernel.h>
@@ -78,11 +79,13 @@ struct crypt_config {
78 */ 79 */
79 struct crypt_iv_operations *iv_gen_ops; 80 struct crypt_iv_operations *iv_gen_ops;
80 char *iv_mode; 81 char *iv_mode;
81 void *iv_gen_private; 82 struct crypto_cipher *iv_gen_private;
82 sector_t iv_offset; 83 sector_t iv_offset;
83 unsigned int iv_size; 84 unsigned int iv_size;
84 85
85 struct crypto_tfm *tfm; 86 char cipher[CRYPTO_MAX_ALG_NAME];
87 char chainmode[CRYPTO_MAX_ALG_NAME];
88 struct crypto_blkcipher *tfm;
86 unsigned int key_size; 89 unsigned int key_size;
87 u8 key[0]; 90 u8 key[0];
88}; 91};
@@ -96,12 +99,12 @@ static kmem_cache_t *_crypt_io_pool;
96/* 99/*
97 * Different IV generation algorithms: 100 * Different IV generation algorithms:
98 * 101 *
99 * plain: the initial vector is the 32-bit low-endian version of the sector 102 * plain: the initial vector is the 32-bit little-endian version of the sector
100 * number, padded with zeros if neccessary. 103 * number, padded with zeros if neccessary.
101 * 104 *
102 * ess_iv: "encrypted sector|salt initial vector", the sector number is 105 * essiv: "encrypted sector|salt initial vector", the sector number is
103 * encrypted with the bulk cipher using a salt as key. The salt 106 * encrypted with the bulk cipher using a salt as key. The salt
104 * should be derived from the bulk cipher's key via hashing. 107 * should be derived from the bulk cipher's key via hashing.
105 * 108 *
106 * plumb: unimplemented, see: 109 * plumb: unimplemented, see:
107 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 110 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
@@ -118,11 +121,13 @@ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
118static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, 121static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
119 const char *opts) 122 const char *opts)
120{ 123{
121 struct crypto_tfm *essiv_tfm; 124 struct crypto_cipher *essiv_tfm;
122 struct crypto_tfm *hash_tfm; 125 struct crypto_hash *hash_tfm;
126 struct hash_desc desc;
123 struct scatterlist sg; 127 struct scatterlist sg;
124 unsigned int saltsize; 128 unsigned int saltsize;
125 u8 *salt; 129 u8 *salt;
130 int err;
126 131
127 if (opts == NULL) { 132 if (opts == NULL) {
128 ti->error = "Digest algorithm missing for ESSIV mode"; 133 ti->error = "Digest algorithm missing for ESSIV mode";
@@ -130,76 +135,70 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
130 } 135 }
131 136
132 /* Hash the cipher key with the given hash algorithm */ 137 /* Hash the cipher key with the given hash algorithm */
133 hash_tfm = crypto_alloc_tfm(opts, CRYPTO_TFM_REQ_MAY_SLEEP); 138 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
134 if (hash_tfm == NULL) { 139 if (IS_ERR(hash_tfm)) {
135 ti->error = "Error initializing ESSIV hash"; 140 ti->error = "Error initializing ESSIV hash";
136 return -EINVAL; 141 return PTR_ERR(hash_tfm);
137 } 142 }
138 143
139 if (crypto_tfm_alg_type(hash_tfm) != CRYPTO_ALG_TYPE_DIGEST) { 144 saltsize = crypto_hash_digestsize(hash_tfm);
140 ti->error = "Expected digest algorithm for ESSIV hash";
141 crypto_free_tfm(hash_tfm);
142 return -EINVAL;
143 }
144
145 saltsize = crypto_tfm_alg_digestsize(hash_tfm);
146 salt = kmalloc(saltsize, GFP_KERNEL); 145 salt = kmalloc(saltsize, GFP_KERNEL);
147 if (salt == NULL) { 146 if (salt == NULL) {
148 ti->error = "Error kmallocing salt storage in ESSIV"; 147 ti->error = "Error kmallocing salt storage in ESSIV";
149 crypto_free_tfm(hash_tfm); 148 crypto_free_hash(hash_tfm);
150 return -ENOMEM; 149 return -ENOMEM;
151 } 150 }
152 151
153 sg_set_buf(&sg, cc->key, cc->key_size); 152 sg_set_buf(&sg, cc->key, cc->key_size);
154 crypto_digest_digest(hash_tfm, &sg, 1, salt); 153 desc.tfm = hash_tfm;
155 crypto_free_tfm(hash_tfm); 154 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
155 err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
156 crypto_free_hash(hash_tfm);
157
158 if (err) {
159 ti->error = "Error calculating hash in ESSIV";
160 return err;
161 }
156 162
157 /* Setup the essiv_tfm with the given salt */ 163 /* Setup the essiv_tfm with the given salt */
158 essiv_tfm = crypto_alloc_tfm(crypto_tfm_alg_name(cc->tfm), 164 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
159 CRYPTO_TFM_MODE_ECB | 165 if (IS_ERR(essiv_tfm)) {
160 CRYPTO_TFM_REQ_MAY_SLEEP);
161 if (essiv_tfm == NULL) {
162 ti->error = "Error allocating crypto tfm for ESSIV"; 166 ti->error = "Error allocating crypto tfm for ESSIV";
163 kfree(salt); 167 kfree(salt);
164 return -EINVAL; 168 return PTR_ERR(essiv_tfm);
165 } 169 }
166 if (crypto_tfm_alg_blocksize(essiv_tfm) 170 if (crypto_cipher_blocksize(essiv_tfm) !=
167 != crypto_tfm_alg_ivsize(cc->tfm)) { 171 crypto_blkcipher_ivsize(cc->tfm)) {
168 ti->error = "Block size of ESSIV cipher does " 172 ti->error = "Block size of ESSIV cipher does "
169 "not match IV size of block cipher"; 173 "not match IV size of block cipher";
170 crypto_free_tfm(essiv_tfm); 174 crypto_free_cipher(essiv_tfm);
171 kfree(salt); 175 kfree(salt);
172 return -EINVAL; 176 return -EINVAL;
173 } 177 }
174 if (crypto_cipher_setkey(essiv_tfm, salt, saltsize) < 0) { 178 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
179 if (err) {
175 ti->error = "Failed to set key for ESSIV cipher"; 180 ti->error = "Failed to set key for ESSIV cipher";
176 crypto_free_tfm(essiv_tfm); 181 crypto_free_cipher(essiv_tfm);
177 kfree(salt); 182 kfree(salt);
178 return -EINVAL; 183 return err;
179 } 184 }
180 kfree(salt); 185 kfree(salt);
181 186
182 cc->iv_gen_private = (void *)essiv_tfm; 187 cc->iv_gen_private = essiv_tfm;
183 return 0; 188 return 0;
184} 189}
185 190
186static void crypt_iv_essiv_dtr(struct crypt_config *cc) 191static void crypt_iv_essiv_dtr(struct crypt_config *cc)
187{ 192{
188 crypto_free_tfm((struct crypto_tfm *)cc->iv_gen_private); 193 crypto_free_cipher(cc->iv_gen_private);
189 cc->iv_gen_private = NULL; 194 cc->iv_gen_private = NULL;
190} 195}
191 196
192static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) 197static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
193{ 198{
194 struct scatterlist sg;
195
196 memset(iv, 0, cc->iv_size); 199 memset(iv, 0, cc->iv_size);
197 *(u64 *)iv = cpu_to_le64(sector); 200 *(u64 *)iv = cpu_to_le64(sector);
198 201 crypto_cipher_encrypt_one(cc->iv_gen_private, iv, iv);
199 sg_set_buf(&sg, iv, cc->iv_size);
200 crypto_cipher_encrypt((struct crypto_tfm *)cc->iv_gen_private,
201 &sg, &sg, cc->iv_size);
202
203 return 0; 202 return 0;
204} 203}
205 204
@@ -220,6 +219,11 @@ crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
220 int write, sector_t sector) 219 int write, sector_t sector)
221{ 220{
222 u8 iv[cc->iv_size]; 221 u8 iv[cc->iv_size];
222 struct blkcipher_desc desc = {
223 .tfm = cc->tfm,
224 .info = iv,
225 .flags = CRYPTO_TFM_REQ_MAY_SLEEP,
226 };
223 int r; 227 int r;
224 228
225 if (cc->iv_gen_ops) { 229 if (cc->iv_gen_ops) {
@@ -228,14 +232,14 @@ crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
228 return r; 232 return r;
229 233
230 if (write) 234 if (write)
231 r = crypto_cipher_encrypt_iv(cc->tfm, out, in, length, iv); 235 r = crypto_blkcipher_encrypt_iv(&desc, out, in, length);
232 else 236 else
233 r = crypto_cipher_decrypt_iv(cc->tfm, out, in, length, iv); 237 r = crypto_blkcipher_decrypt_iv(&desc, out, in, length);
234 } else { 238 } else {
235 if (write) 239 if (write)
236 r = crypto_cipher_encrypt(cc->tfm, out, in, length); 240 r = crypto_blkcipher_encrypt(&desc, out, in, length);
237 else 241 else
238 r = crypto_cipher_decrypt(cc->tfm, out, in, length); 242 r = crypto_blkcipher_decrypt(&desc, out, in, length);
239 } 243 }
240 244
241 return r; 245 return r;
@@ -510,13 +514,12 @@ static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
510static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) 514static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
511{ 515{
512 struct crypt_config *cc; 516 struct crypt_config *cc;
513 struct crypto_tfm *tfm; 517 struct crypto_blkcipher *tfm;
514 char *tmp; 518 char *tmp;
515 char *cipher; 519 char *cipher;
516 char *chainmode; 520 char *chainmode;
517 char *ivmode; 521 char *ivmode;
518 char *ivopts; 522 char *ivopts;
519 unsigned int crypto_flags;
520 unsigned int key_size; 523 unsigned int key_size;
521 unsigned long long tmpll; 524 unsigned long long tmpll;
522 525
@@ -556,31 +559,25 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
556 ivmode = "plain"; 559 ivmode = "plain";
557 } 560 }
558 561
559 /* Choose crypto_flags according to chainmode */ 562 if (strcmp(chainmode, "ecb") && !ivmode) {
560 if (strcmp(chainmode, "cbc") == 0) 563 ti->error = "This chaining mode requires an IV mechanism";
561 crypto_flags = CRYPTO_TFM_MODE_CBC;
562 else if (strcmp(chainmode, "ecb") == 0)
563 crypto_flags = CRYPTO_TFM_MODE_ECB;
564 else {
565 ti->error = "Unknown chaining mode";
566 goto bad1; 564 goto bad1;
567 } 565 }
568 566
569 if (crypto_flags != CRYPTO_TFM_MODE_ECB && !ivmode) { 567 if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", chainmode,
570 ti->error = "This chaining mode requires an IV mechanism"; 568 cipher) >= CRYPTO_MAX_ALG_NAME) {
569 ti->error = "Chain mode + cipher name is too long";
571 goto bad1; 570 goto bad1;
572 } 571 }
573 572
574 tfm = crypto_alloc_tfm(cipher, crypto_flags | CRYPTO_TFM_REQ_MAY_SLEEP); 573 tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
575 if (!tfm) { 574 if (IS_ERR(tfm)) {
576 ti->error = "Error allocating crypto tfm"; 575 ti->error = "Error allocating crypto tfm";
577 goto bad1; 576 goto bad1;
578 } 577 }
579 if (crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER) {
580 ti->error = "Expected cipher algorithm";
581 goto bad2;
582 }
583 578
579 strcpy(cc->cipher, cipher);
580 strcpy(cc->chainmode, chainmode);
584 cc->tfm = tfm; 581 cc->tfm = tfm;
585 582
586 /* 583 /*
@@ -603,12 +600,12 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
603 cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) 600 cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
604 goto bad2; 601 goto bad2;
605 602
606 if (tfm->crt_cipher.cit_decrypt_iv && tfm->crt_cipher.cit_encrypt_iv) 603 cc->iv_size = crypto_blkcipher_ivsize(tfm);
604 if (cc->iv_size)
607 /* at least a 64 bit sector number should fit in our buffer */ 605 /* at least a 64 bit sector number should fit in our buffer */
608 cc->iv_size = max(crypto_tfm_alg_ivsize(tfm), 606 cc->iv_size = max(cc->iv_size,
609 (unsigned int)(sizeof(u64) / sizeof(u8))); 607 (unsigned int)(sizeof(u64) / sizeof(u8)));
610 else { 608 else {
611 cc->iv_size = 0;
612 if (cc->iv_gen_ops) { 609 if (cc->iv_gen_ops) {
613 DMWARN("Selected cipher does not support IVs"); 610 DMWARN("Selected cipher does not support IVs");
614 if (cc->iv_gen_ops->dtr) 611 if (cc->iv_gen_ops->dtr)
@@ -629,7 +626,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
629 goto bad4; 626 goto bad4;
630 } 627 }
631 628
632 if (tfm->crt_cipher.cit_setkey(tfm, cc->key, key_size) < 0) { 629 if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) {
633 ti->error = "Error setting key"; 630 ti->error = "Error setting key";
634 goto bad5; 631 goto bad5;
635 } 632 }
@@ -675,7 +672,7 @@ bad3:
675 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 672 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
676 cc->iv_gen_ops->dtr(cc); 673 cc->iv_gen_ops->dtr(cc);
677bad2: 674bad2:
678 crypto_free_tfm(tfm); 675 crypto_free_blkcipher(tfm);
679bad1: 676bad1:
680 /* Must zero key material before freeing */ 677 /* Must zero key material before freeing */
681 memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); 678 memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
@@ -693,7 +690,7 @@ static void crypt_dtr(struct dm_target *ti)
693 kfree(cc->iv_mode); 690 kfree(cc->iv_mode);
694 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 691 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
695 cc->iv_gen_ops->dtr(cc); 692 cc->iv_gen_ops->dtr(cc);
696 crypto_free_tfm(cc->tfm); 693 crypto_free_blkcipher(cc->tfm);
697 dm_put_device(ti, cc->dev); 694 dm_put_device(ti, cc->dev);
698 695
699 /* Must zero key material before freeing */ 696 /* Must zero key material before freeing */
@@ -858,18 +855,9 @@ static int crypt_status(struct dm_target *ti, status_type_t type,
858 break; 855 break;
859 856
860 case STATUSTYPE_TABLE: 857 case STATUSTYPE_TABLE:
861 cipher = crypto_tfm_alg_name(cc->tfm); 858 cipher = crypto_blkcipher_name(cc->tfm);
862 859
863 switch(cc->tfm->crt_cipher.cit_mode) { 860 chainmode = cc->chainmode;
864 case CRYPTO_TFM_MODE_CBC:
865 chainmode = "cbc";
866 break;
867 case CRYPTO_TFM_MODE_ECB:
868 chainmode = "ecb";
869 break;
870 default:
871 BUG();
872 }
873 861
874 if (cc->iv_mode) 862 if (cc->iv_mode)
875 DMEMIT("%s-%s-%s ", cipher, chainmode, cc->iv_mode); 863 DMEMIT("%s-%s-%s ", cipher, chainmode, cc->iv_mode);
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 85696f34c310..e57bb035a021 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -162,7 +162,13 @@ static struct fc_function_template mptfc_transport_functions = {
162 .show_starget_port_id = 1, 162 .show_starget_port_id = 1,
163 .set_rport_dev_loss_tmo = mptfc_set_rport_loss_tmo, 163 .set_rport_dev_loss_tmo = mptfc_set_rport_loss_tmo,
164 .show_rport_dev_loss_tmo = 1, 164 .show_rport_dev_loss_tmo = 1,
165 165 .show_host_supported_speeds = 1,
166 .show_host_maxframe_size = 1,
167 .show_host_speed = 1,
168 .show_host_fabric_name = 1,
169 .show_host_port_type = 1,
170 .show_host_port_state = 1,
171 .show_host_symbolic_name = 1,
166}; 172};
167 173
168static void 174static void
@@ -839,33 +845,95 @@ mptfc_SetFcPortPage1_defaults(MPT_ADAPTER *ioc)
839static void 845static void
840mptfc_init_host_attr(MPT_ADAPTER *ioc,int portnum) 846mptfc_init_host_attr(MPT_ADAPTER *ioc,int portnum)
841{ 847{
842 unsigned class = 0, cos = 0; 848 unsigned class = 0;
849 unsigned cos = 0;
850 unsigned speed;
851 unsigned port_type;
852 unsigned port_state;
853 FCPortPage0_t *pp0;
854 struct Scsi_Host *sh;
855 char *sn;
843 856
844 /* don't know what to do as only one scsi (fc) host was allocated */ 857 /* don't know what to do as only one scsi (fc) host was allocated */
845 if (portnum != 0) 858 if (portnum != 0)
846 return; 859 return;
847 860
848 class = ioc->fc_port_page0[portnum].SupportedServiceClass; 861 pp0 = &ioc->fc_port_page0[portnum];
862 sh = ioc->sh;
863
864 sn = fc_host_symbolic_name(sh);
865 snprintf(sn, FC_SYMBOLIC_NAME_SIZE, "%s %s%08xh",
866 ioc->prod_name,
867 MPT_FW_REV_MAGIC_ID_STRING,
868 ioc->facts.FWVersion.Word);
869
870 fc_host_tgtid_bind_type(sh) = FC_TGTID_BIND_BY_WWPN;
871
872 fc_host_maxframe_size(sh) = pp0->MaxFrameSize;
873
874 fc_host_node_name(sh) =
875 (u64)pp0->WWNN.High << 32 | (u64)pp0->WWNN.Low;
876
877 fc_host_port_name(sh) =
878 (u64)pp0->WWPN.High << 32 | (u64)pp0->WWPN.Low;
879
880 fc_host_port_id(sh) = pp0->PortIdentifier;
881
882 class = pp0->SupportedServiceClass;
849 if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_1) 883 if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_1)
850 cos |= FC_COS_CLASS1; 884 cos |= FC_COS_CLASS1;
851 if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_2) 885 if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_2)
852 cos |= FC_COS_CLASS2; 886 cos |= FC_COS_CLASS2;
853 if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_3) 887 if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_3)
854 cos |= FC_COS_CLASS3; 888 cos |= FC_COS_CLASS3;
889 fc_host_supported_classes(sh) = cos;
890
891 if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT)
892 speed = FC_PORTSPEED_1GBIT;
893 else if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT)
894 speed = FC_PORTSPEED_2GBIT;
895 else if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT)
896 speed = FC_PORTSPEED_4GBIT;
897 else if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT)
898 speed = FC_PORTSPEED_10GBIT;
899 else
900 speed = FC_PORTSPEED_UNKNOWN;
901 fc_host_speed(sh) = speed;
902
903 speed = 0;
904 if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_1GBIT_SPEED)
905 speed |= FC_PORTSPEED_1GBIT;
906 if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_2GBIT_SPEED)
907 speed |= FC_PORTSPEED_2GBIT;
908 if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_4GBIT_SPEED)
909 speed |= FC_PORTSPEED_4GBIT;
910 if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_10GBIT_SPEED)
911 speed |= FC_PORTSPEED_10GBIT;
912 fc_host_supported_speeds(sh) = speed;
913
914 port_state = FC_PORTSTATE_UNKNOWN;
915 if (pp0->PortState == MPI_FCPORTPAGE0_PORTSTATE_ONLINE)
916 port_state = FC_PORTSTATE_ONLINE;
917 else if (pp0->PortState == MPI_FCPORTPAGE0_PORTSTATE_OFFLINE)
918 port_state = FC_PORTSTATE_LINKDOWN;
919 fc_host_port_state(sh) = port_state;
920
921 port_type = FC_PORTTYPE_UNKNOWN;
922 if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT)
923 port_type = FC_PORTTYPE_PTP;
924 else if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP)
925 port_type = FC_PORTTYPE_LPORT;
926 else if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP)
927 port_type = FC_PORTTYPE_NLPORT;
928 else if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT)
929 port_type = FC_PORTTYPE_NPORT;
930 fc_host_port_type(sh) = port_type;
931
932 fc_host_fabric_name(sh) =
933 (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_FABRIC_WWN_VALID) ?
934 (u64) pp0->FabricWWNN.High << 32 | (u64) pp0->FabricWWPN.Low :
935 (u64)pp0->WWNN.High << 32 | (u64)pp0->WWNN.Low;
855 936
856 fc_host_node_name(ioc->sh) =
857 (u64)ioc->fc_port_page0[portnum].WWNN.High << 32
858 | (u64)ioc->fc_port_page0[portnum].WWNN.Low;
859
860 fc_host_port_name(ioc->sh) =
861 (u64)ioc->fc_port_page0[portnum].WWPN.High << 32
862 | (u64)ioc->fc_port_page0[portnum].WWPN.Low;
863
864 fc_host_port_id(ioc->sh) = ioc->fc_port_page0[portnum].PortIdentifier;
865
866 fc_host_supported_classes(ioc->sh) = cos;
867
868 fc_host_tgtid_bind_type(ioc->sh) = FC_TGTID_BIND_BY_WWPN;
869} 937}
870 938
871static void 939static void
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index f66f2203143a..b752a479f6db 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -852,6 +852,10 @@ static int mptsas_get_linkerrors(struct sas_phy *phy)
852 dma_addr_t dma_handle; 852 dma_addr_t dma_handle;
853 int error; 853 int error;
854 854
855 /* FIXME: only have link errors on local phys */
856 if (!scsi_is_sas_phy_local(phy))
857 return -EINVAL;
858
855 hdr.PageVersion = MPI_SASPHY1_PAGEVERSION; 859 hdr.PageVersion = MPI_SASPHY1_PAGEVERSION;
856 hdr.ExtPageLength = 0; 860 hdr.ExtPageLength = 0;
857 hdr.PageNumber = 1 /* page number 1*/; 861 hdr.PageNumber = 1 /* page number 1*/;
@@ -924,6 +928,10 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
924 unsigned long timeleft; 928 unsigned long timeleft;
925 int error = -ERESTARTSYS; 929 int error = -ERESTARTSYS;
926 930
931 /* FIXME: fusion doesn't allow non-local phy reset */
932 if (!scsi_is_sas_phy_local(phy))
933 return -EINVAL;
934
927 /* not implemented for expanders */ 935 /* not implemented for expanders */
928 if (phy->identify.target_port_protocols & SAS_PROTOCOL_SMP) 936 if (phy->identify.target_port_protocols & SAS_PROTOCOL_SMP)
929 return -ENXIO; 937 return -ENXIO;
@@ -1570,9 +1578,6 @@ static int mptsas_probe_one_phy(struct device *dev,
1570 1578
1571 if (!phy_info->phy) { 1579 if (!phy_info->phy) {
1572 1580
1573 if (local)
1574 phy->local_attached = 1;
1575
1576 error = sas_phy_add(phy); 1581 error = sas_phy_add(phy);
1577 if (error) { 1582 if (error) {
1578 sas_phy_free(phy); 1583 sas_phy_free(phy);
@@ -1642,14 +1647,18 @@ static int mptsas_probe_one_phy(struct device *dev,
1642 1647
1643 for (i = 0; i < port_info->num_phys; i++) 1648 for (i = 0; i < port_info->num_phys; i++)
1644 if (port_info->phy_info[i].identify.sas_address == 1649 if (port_info->phy_info[i].identify.sas_address ==
1645 identify.sas_address) 1650 identify.sas_address) {
1651 sas_port_mark_backlink(port);
1646 goto out; 1652 goto out;
1653 }
1647 1654
1648 } else if (scsi_is_sas_rphy(parent)) { 1655 } else if (scsi_is_sas_rphy(parent)) {
1649 struct sas_rphy *parent_rphy = dev_to_rphy(parent); 1656 struct sas_rphy *parent_rphy = dev_to_rphy(parent);
1650 if (identify.sas_address == 1657 if (identify.sas_address ==
1651 parent_rphy->identify.sas_address) 1658 parent_rphy->identify.sas_address) {
1659 sas_port_mark_backlink(port);
1652 goto out; 1660 goto out;
1661 }
1653 } 1662 }
1654 1663
1655 switch (identify.device_type) { 1664 switch (identify.device_type) {
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 1344ad7a4b14..a03e862851db 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -101,7 +101,7 @@ config MTD_REDBOOT_PARTS_READONLY
101 101
102config MTD_CMDLINE_PARTS 102config MTD_CMDLINE_PARTS
103 bool "Command line partition table parsing" 103 bool "Command line partition table parsing"
104 depends on MTD_PARTITIONS = "y" 104 depends on MTD_PARTITIONS = "y" && MTD = "y"
105 ---help--- 105 ---help---
106 Allow generic configuration of the MTD partition tables via the kernel 106 Allow generic configuration of the MTD partition tables via the kernel
107 command line. Multiple flash resources are supported for hardware where 107 command line. Multiple flash resources are supported for hardware where
@@ -263,6 +263,14 @@ config RFD_FTL
263 263
264 http://www.gensw.com/pages/prod/bios/rfd.htm 264 http://www.gensw.com/pages/prod/bios/rfd.htm
265 265
266config SSFDC
267 tristate "NAND SSFDC (SmartMedia) read only translation layer"
268 depends on MTD
269 default n
270 help
271 This enables read only access to SmartMedia formatted NAND
272 flash. You can mount it with FAT file system.
273
266source "drivers/mtd/chips/Kconfig" 274source "drivers/mtd/chips/Kconfig"
267 275
268source "drivers/mtd/maps/Kconfig" 276source "drivers/mtd/maps/Kconfig"
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index fc9374407c2b..1e36b9aed98b 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_FTL) += ftl.o mtd_blkdevs.o
21obj-$(CONFIG_NFTL) += nftl.o mtd_blkdevs.o 21obj-$(CONFIG_NFTL) += nftl.o mtd_blkdevs.o
22obj-$(CONFIG_INFTL) += inftl.o mtd_blkdevs.o 22obj-$(CONFIG_INFTL) += inftl.o mtd_blkdevs.o
23obj-$(CONFIG_RFD_FTL) += rfd_ftl.o mtd_blkdevs.o 23obj-$(CONFIG_RFD_FTL) += rfd_ftl.o mtd_blkdevs.o
24obj-$(CONFIG_SSFDC) += ssfdc.o mtd_blkdevs.o
24 25
25nftl-objs := nftlcore.o nftlmount.o 26nftl-objs := nftlcore.o nftlmount.o
26inftl-objs := inftlcore.o inftlmount.o 27inftl-objs := inftlcore.o inftlmount.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 39edb8250fbc..7ea49a0d5ec3 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -908,7 +908,7 @@ static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
908 908
909static int __xipram xip_wait_for_operation( 909static int __xipram xip_wait_for_operation(
910 struct map_info *map, struct flchip *chip, 910 struct map_info *map, struct flchip *chip,
911 unsigned long adr, int *chip_op_time ) 911 unsigned long adr, unsigned int chip_op_time )
912{ 912{
913 struct cfi_private *cfi = map->fldrv_priv; 913 struct cfi_private *cfi = map->fldrv_priv;
914 struct cfi_pri_intelext *cfip = cfi->cmdset_priv; 914 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
@@ -917,7 +917,7 @@ static int __xipram xip_wait_for_operation(
917 flstate_t oldstate, newstate; 917 flstate_t oldstate, newstate;
918 918
919 start = xip_currtime(); 919 start = xip_currtime();
920 usec = *chip_op_time * 8; 920 usec = chip_op_time * 8;
921 if (usec == 0) 921 if (usec == 0)
922 usec = 500000; 922 usec = 500000;
923 done = 0; 923 done = 0;
@@ -1027,8 +1027,8 @@ static int __xipram xip_wait_for_operation(
1027#define XIP_INVAL_CACHED_RANGE(map, from, size) \ 1027#define XIP_INVAL_CACHED_RANGE(map, from, size) \
1028 INVALIDATE_CACHED_RANGE(map, from, size) 1028 INVALIDATE_CACHED_RANGE(map, from, size)
1029 1029
1030#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, p_usec) \ 1030#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
1031 xip_wait_for_operation(map, chip, cmd_adr, p_usec) 1031 xip_wait_for_operation(map, chip, cmd_adr, usec)
1032 1032
1033#else 1033#else
1034 1034
@@ -1040,64 +1040,64 @@ static int __xipram xip_wait_for_operation(
1040static int inval_cache_and_wait_for_operation( 1040static int inval_cache_and_wait_for_operation(
1041 struct map_info *map, struct flchip *chip, 1041 struct map_info *map, struct flchip *chip,
1042 unsigned long cmd_adr, unsigned long inval_adr, int inval_len, 1042 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1043 int *chip_op_time ) 1043 unsigned int chip_op_time)
1044{ 1044{
1045 struct cfi_private *cfi = map->fldrv_priv; 1045 struct cfi_private *cfi = map->fldrv_priv;
1046 map_word status, status_OK = CMD(0x80); 1046 map_word status, status_OK = CMD(0x80);
1047 int z, chip_state = chip->state; 1047 int chip_state = chip->state;
1048 unsigned long timeo; 1048 unsigned int timeo, sleep_time;
1049 1049
1050 spin_unlock(chip->mutex); 1050 spin_unlock(chip->mutex);
1051 if (inval_len) 1051 if (inval_len)
1052 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len); 1052 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1053 if (*chip_op_time)
1054 cfi_udelay(*chip_op_time);
1055 spin_lock(chip->mutex); 1053 spin_lock(chip->mutex);
1056 1054
1057 timeo = *chip_op_time * 8 * HZ / 1000000; 1055 /* set our timeout to 8 times the expected delay */
1058 if (timeo < HZ/2) 1056 timeo = chip_op_time * 8;
1059 timeo = HZ/2; 1057 if (!timeo)
1060 timeo += jiffies; 1058 timeo = 500000;
1059 sleep_time = chip_op_time / 2;
1061 1060
1062 z = 0;
1063 for (;;) { 1061 for (;;) {
1064 if (chip->state != chip_state) {
1065 /* Someone's suspended the operation: sleep */
1066 DECLARE_WAITQUEUE(wait, current);
1067
1068 set_current_state(TASK_UNINTERRUPTIBLE);
1069 add_wait_queue(&chip->wq, &wait);
1070 spin_unlock(chip->mutex);
1071 schedule();
1072 remove_wait_queue(&chip->wq, &wait);
1073 timeo = jiffies + (HZ / 2); /* FIXME */
1074 spin_lock(chip->mutex);
1075 continue;
1076 }
1077
1078 status = map_read(map, cmd_adr); 1062 status = map_read(map, cmd_adr);
1079 if (map_word_andequal(map, status, status_OK, status_OK)) 1063 if (map_word_andequal(map, status, status_OK, status_OK))
1080 break; 1064 break;
1081 1065
1082 /* OK Still waiting */ 1066 if (!timeo) {
1083 if (time_after(jiffies, timeo)) {
1084 map_write(map, CMD(0x70), cmd_adr); 1067 map_write(map, CMD(0x70), cmd_adr);
1085 chip->state = FL_STATUS; 1068 chip->state = FL_STATUS;
1086 return -ETIME; 1069 return -ETIME;
1087 } 1070 }
1088 1071
1089 /* Latency issues. Drop the lock, wait a while and retry */ 1072 /* OK Still waiting. Drop the lock, wait a while and retry. */
1090 z++;
1091 spin_unlock(chip->mutex); 1073 spin_unlock(chip->mutex);
1092 cfi_udelay(1); 1074 if (sleep_time >= 1000000/HZ) {
1075 /*
1076 * Half of the normal delay still remaining
1077 * can be performed with a sleeping delay instead
1078 * of busy waiting.
1079 */
1080 msleep(sleep_time/1000);
1081 timeo -= sleep_time;
1082 sleep_time = 1000000/HZ;
1083 } else {
1084 udelay(1);
1085 cond_resched();
1086 timeo--;
1087 }
1093 spin_lock(chip->mutex); 1088 spin_lock(chip->mutex);
1094 }
1095 1089
1096 if (!z) { 1090 if (chip->state != chip_state) {
1097 if (!--(*chip_op_time)) 1091 /* Someone's suspended the operation: sleep */
1098 *chip_op_time = 1; 1092 DECLARE_WAITQUEUE(wait, current);
1099 } else if (z > 1) 1093 set_current_state(TASK_UNINTERRUPTIBLE);
1100 ++(*chip_op_time); 1094 add_wait_queue(&chip->wq, &wait);
1095 spin_unlock(chip->mutex);
1096 schedule();
1097 remove_wait_queue(&chip->wq, &wait);
1098 spin_lock(chip->mutex);
1099 }
1100 }
1101 1101
1102 /* Done and happy. */ 1102 /* Done and happy. */
1103 chip->state = FL_STATUS; 1103 chip->state = FL_STATUS;
@@ -1107,8 +1107,7 @@ static int inval_cache_and_wait_for_operation(
1107#endif 1107#endif
1108 1108
1109#define WAIT_TIMEOUT(map, chip, adr, udelay) \ 1109#define WAIT_TIMEOUT(map, chip, adr, udelay) \
1110 ({ int __udelay = (udelay); \ 1110 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
1111 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, &__udelay); })
1112 1111
1113 1112
1114static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len) 1113static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
@@ -1332,7 +1331,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1332 1331
1333 ret = INVAL_CACHE_AND_WAIT(map, chip, adr, 1332 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1334 adr, map_bankwidth(map), 1333 adr, map_bankwidth(map),
1335 &chip->word_write_time); 1334 chip->word_write_time);
1336 if (ret) { 1335 if (ret) {
1337 xip_enable(map, chip, adr); 1336 xip_enable(map, chip, adr);
1338 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name); 1337 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
@@ -1569,7 +1568,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1569 1568
1570 ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, 1569 ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1571 adr, len, 1570 adr, len,
1572 &chip->buffer_write_time); 1571 chip->buffer_write_time);
1573 if (ret) { 1572 if (ret) {
1574 map_write(map, CMD(0x70), cmd_adr); 1573 map_write(map, CMD(0x70), cmd_adr);
1575 chip->state = FL_STATUS; 1574 chip->state = FL_STATUS;
@@ -1704,7 +1703,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1704 1703
1705 ret = INVAL_CACHE_AND_WAIT(map, chip, adr, 1704 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1706 adr, len, 1705 adr, len,
1707 &chip->erase_time); 1706 chip->erase_time);
1708 if (ret) { 1707 if (ret) {
1709 map_write(map, CMD(0x70), adr); 1708 map_write(map, CMD(0x70), adr);
1710 chip->state = FL_STATUS; 1709 chip->state = FL_STATUS;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 9885726a16e4..702ae4cd8691 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -45,9 +45,11 @@
45#define MAX_WORD_RETRIES 3 45#define MAX_WORD_RETRIES 3
46 46
47#define MANUFACTURER_AMD 0x0001 47#define MANUFACTURER_AMD 0x0001
48#define MANUFACTURER_ATMEL 0x001F
48#define MANUFACTURER_SST 0x00BF 49#define MANUFACTURER_SST 0x00BF
49#define SST49LF004B 0x0060 50#define SST49LF004B 0x0060
50#define SST49LF008A 0x005a 51#define SST49LF008A 0x005a
52#define AT49BV6416 0x00d6
51 53
52static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 54static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
53static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 55static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@@ -68,6 +70,9 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
68static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); 70static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
69#include "fwh_lock.h" 71#include "fwh_lock.h"
70 72
73static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
74static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
75
71static struct mtd_chip_driver cfi_amdstd_chipdrv = { 76static struct mtd_chip_driver cfi_amdstd_chipdrv = {
72 .probe = NULL, /* Not usable directly */ 77 .probe = NULL, /* Not usable directly */
73 .destroy = cfi_amdstd_destroy, 78 .destroy = cfi_amdstd_destroy,
@@ -161,6 +166,26 @@ static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
161 } 166 }
162} 167}
163 168
169/* Atmel chips don't use the same PRI format as AMD chips */
170static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
171{
172 struct map_info *map = mtd->priv;
173 struct cfi_private *cfi = map->fldrv_priv;
174 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
175 struct cfi_pri_atmel atmel_pri;
176
177 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
178 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
179
180 if (atmel_pri.Features & 0x02)
181 extp->EraseSuspend = 2;
182
183 if (atmel_pri.BottomBoot)
184 extp->TopBottom = 2;
185 else
186 extp->TopBottom = 3;
187}
188
164static void fixup_use_secsi(struct mtd_info *mtd, void *param) 189static void fixup_use_secsi(struct mtd_info *mtd, void *param)
165{ 190{
166 /* Setup for chips with a secsi area */ 191 /* Setup for chips with a secsi area */
@@ -179,6 +204,17 @@ static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
179 204
180} 205}
181 206
207/*
208 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
209 * locked by default.
210 */
211static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
212{
213 mtd->lock = cfi_atmel_lock;
214 mtd->unlock = cfi_atmel_unlock;
215 mtd->flags |= MTD_STUPID_LOCK;
216}
217
182static struct cfi_fixup cfi_fixup_table[] = { 218static struct cfi_fixup cfi_fixup_table[] = {
183#ifdef AMD_BOOTLOC_BUG 219#ifdef AMD_BOOTLOC_BUG
184 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL }, 220 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
@@ -192,6 +228,7 @@ static struct cfi_fixup cfi_fixup_table[] = {
192#if !FORCE_WORD_WRITE 228#if !FORCE_WORD_WRITE
193 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, }, 229 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
194#endif 230#endif
231 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
195 { 0, 0, NULL, NULL } 232 { 0, 0, NULL, NULL }
196}; 233};
197static struct cfi_fixup jedec_fixup_table[] = { 234static struct cfi_fixup jedec_fixup_table[] = {
@@ -207,6 +244,7 @@ static struct cfi_fixup fixup_table[] = {
207 * we know that is the case. 244 * we know that is the case.
208 */ 245 */
209 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL }, 246 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
247 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
210 { 0, 0, NULL, NULL } 248 { 0, 0, NULL, NULL }
211}; 249};
212 250
@@ -1607,6 +1645,80 @@ static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1607 return 0; 1645 return 0;
1608} 1646}
1609 1647
1648static int do_atmel_lock(struct map_info *map, struct flchip *chip,
1649 unsigned long adr, int len, void *thunk)
1650{
1651 struct cfi_private *cfi = map->fldrv_priv;
1652 int ret;
1653
1654 spin_lock(chip->mutex);
1655 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
1656 if (ret)
1657 goto out_unlock;
1658 chip->state = FL_LOCKING;
1659
1660 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1661 __func__, adr, len);
1662
1663 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1664 cfi->device_type, NULL);
1665 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1666 cfi->device_type, NULL);
1667 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
1668 cfi->device_type, NULL);
1669 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1670 cfi->device_type, NULL);
1671 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1672 cfi->device_type, NULL);
1673 map_write(map, CMD(0x40), chip->start + adr);
1674
1675 chip->state = FL_READY;
1676 put_chip(map, chip, adr + chip->start);
1677 ret = 0;
1678
1679out_unlock:
1680 spin_unlock(chip->mutex);
1681 return ret;
1682}
1683
1684static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
1685 unsigned long adr, int len, void *thunk)
1686{
1687 struct cfi_private *cfi = map->fldrv_priv;
1688 int ret;
1689
1690 spin_lock(chip->mutex);
1691 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
1692 if (ret)
1693 goto out_unlock;
1694 chip->state = FL_UNLOCKING;
1695
1696 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1697 __func__, adr, len);
1698
1699 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1700 cfi->device_type, NULL);
1701 map_write(map, CMD(0x70), adr);
1702
1703 chip->state = FL_READY;
1704 put_chip(map, chip, adr + chip->start);
1705 ret = 0;
1706
1707out_unlock:
1708 spin_unlock(chip->mutex);
1709 return ret;
1710}
1711
1712static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1713{
1714 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
1715}
1716
1717static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1718{
1719 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
1720}
1721
1610 1722
1611static void cfi_amdstd_sync (struct mtd_info *mtd) 1723static void cfi_amdstd_sync (struct mtd_info *mtd)
1612{ 1724{
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index 8f39d0a31438..1154dac715aa 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -111,6 +111,7 @@
111#define MX29LV040C 0x004F 111#define MX29LV040C 0x004F
112#define MX29LV160T 0x22C4 112#define MX29LV160T 0x22C4
113#define MX29LV160B 0x2249 113#define MX29LV160B 0x2249
114#define MX29F040 0x00A4
114#define MX29F016 0x00AD 115#define MX29F016 0x00AD
115#define MX29F002T 0x00B0 116#define MX29F002T 0x00B0
116#define MX29F004T 0x0045 117#define MX29F004T 0x0045
@@ -1172,6 +1173,19 @@ static const struct amd_flash_info jedec_table[] = {
1172 } 1173 }
1173 }, { 1174 }, {
1174 .mfr_id = MANUFACTURER_MACRONIX, 1175 .mfr_id = MANUFACTURER_MACRONIX,
1176 .dev_id = MX29F040,
1177 .name = "Macronix MX29F040",
1178 .uaddr = {
1179 [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
1180 },
1181 .DevSize = SIZE_512KiB,
1182 .CmdSet = P_ID_AMD_STD,
1183 .NumEraseRegions= 1,
1184 .regions = {
1185 ERASEINFO(0x10000,8),
1186 }
1187 }, {
1188 .mfr_id = MANUFACTURER_MACRONIX,
1175 .dev_id = MX29F016, 1189 .dev_id = MX29F016,
1176 .name = "Macronix MX29F016", 1190 .name = "Macronix MX29F016",
1177 .uaddr = { 1191 .uaddr = {
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index ede3561be870..401c6a294baa 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -18,6 +18,7 @@
18#include <linux/mtd/mtd.h> 18#include <linux/mtd/mtd.h>
19#include <linux/buffer_head.h> 19#include <linux/buffer_head.h>
20#include <linux/mutex.h> 20#include <linux/mutex.h>
21#include <linux/mount.h>
21 22
22#define VERSION "$Revision: 1.30 $" 23#define VERSION "$Revision: 1.30 $"
23 24
@@ -236,6 +237,8 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
236 } 237 }
237 return 0; 238 return 0;
238} 239}
240
241
239static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len, 242static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
240 size_t *retlen, const u_char *buf) 243 size_t *retlen, const u_char *buf)
241{ 244{
@@ -299,6 +302,19 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
299 302
300 /* Get a handle on the device */ 303 /* Get a handle on the device */
301 bdev = open_bdev_excl(devname, O_RDWR, NULL); 304 bdev = open_bdev_excl(devname, O_RDWR, NULL);
305#ifndef MODULE
306 if (IS_ERR(bdev)) {
307
308 /* We might not have rootfs mounted at this point. Try
309 to resolve the device name by other means. */
310
311 dev_t dev = name_to_dev_t(devname);
312 if (dev != 0) {
313 bdev = open_by_devnum(dev, FMODE_WRITE | FMODE_READ);
314 }
315 }
316#endif
317
302 if (IS_ERR(bdev)) { 318 if (IS_ERR(bdev)) {
303 ERROR("error: cannot open device %s", devname); 319 ERROR("error: cannot open device %s", devname);
304 goto devinit_err; 320 goto devinit_err;
@@ -393,26 +409,6 @@ static int parse_num(size_t *num, const char *token)
393} 409}
394 410
395 411
396static int parse_name(char **pname, const char *token, size_t limit)
397{
398 size_t len;
399 char *name;
400
401 len = strlen(token) + 1;
402 if (len > limit)
403 return -ENOSPC;
404
405 name = kmalloc(len, GFP_KERNEL);
406 if (!name)
407 return -ENOMEM;
408
409 strcpy(name, token);
410
411 *pname = name;
412 return 0;
413}
414
415
416static inline void kill_final_newline(char *str) 412static inline void kill_final_newline(char *str)
417{ 413{
418 char *newline = strrchr(str, '\n'); 414 char *newline = strrchr(str, '\n');
@@ -426,9 +422,15 @@ static inline void kill_final_newline(char *str)
426 return 0; \ 422 return 0; \
427} while (0) 423} while (0)
428 424
429static int block2mtd_setup(const char *val, struct kernel_param *kp) 425#ifndef MODULE
426static int block2mtd_init_called = 0;
427static __initdata char block2mtd_paramline[80 + 12]; /* 80 for device, 12 for erase size */
428#endif
429
430
431static int block2mtd_setup2(const char *val)
430{ 432{
431 char buf[80+12]; /* 80 for device, 12 for erase size */ 433 char buf[80 + 12]; /* 80 for device, 12 for erase size */
432 char *str = buf; 434 char *str = buf;
433 char *token[2]; 435 char *token[2];
434 char *name; 436 char *name;
@@ -450,13 +452,9 @@ static int block2mtd_setup(const char *val, struct kernel_param *kp)
450 if (!token[0]) 452 if (!token[0])
451 parse_err("no argument"); 453 parse_err("no argument");
452 454
453 ret = parse_name(&name, token[0], 80); 455 name = token[0];
454 if (ret == -ENOMEM) 456 if (strlen(name) + 1 > 80)
455 parse_err("out of memory"); 457 parse_err("device name too long");
456 if (ret == -ENOSPC)
457 parse_err("name too long");
458 if (ret)
459 return 0;
460 458
461 if (token[1]) { 459 if (token[1]) {
462 ret = parse_num(&erase_size, token[1]); 460 ret = parse_num(&erase_size, token[1]);
@@ -472,13 +470,48 @@ static int block2mtd_setup(const char *val, struct kernel_param *kp)
472} 470}
473 471
474 472
473static int block2mtd_setup(const char *val, struct kernel_param *kp)
474{
475#ifdef MODULE
476 return block2mtd_setup2(val);
477#else
478 /* If more parameters are later passed in via
479 /sys/module/block2mtd/parameters/block2mtd
480 and block2mtd_init() has already been called,
481 we can parse the argument now. */
482
483 if (block2mtd_init_called)
484 return block2mtd_setup2(val);
485
486 /* During early boot stage, we only save the parameters
487 here. We must parse them later: if the param passed
488 from kernel boot command line, block2mtd_setup() is
489 called so early that it is not possible to resolve
490 the device (even kmalloc() fails). Deter that work to
491 block2mtd_setup2(). */
492
493 strlcpy(block2mtd_paramline, val, sizeof(block2mtd_paramline));
494
495 return 0;
496#endif
497}
498
499
475module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200); 500module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
476MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\""); 501MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
477 502
478static int __init block2mtd_init(void) 503static int __init block2mtd_init(void)
479{ 504{
505 int ret = 0;
480 INFO("version " VERSION); 506 INFO("version " VERSION);
481 return 0; 507
508#ifndef MODULE
509 if (strlen(block2mtd_paramline))
510 ret = block2mtd_setup2(block2mtd_paramline);
511 block2mtd_init_called = 1;
512#endif
513
514 return ret;
482} 515}
483 516
484 517
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index a8466141e914..ef4a731ca5c2 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -406,13 +406,13 @@ struct flash_info {
406 406
407static struct flash_info __devinitdata m25p_data [] = { 407static struct flash_info __devinitdata m25p_data [] = {
408 /* REVISIT: fill in JEDEC ids, for parts that have them */ 408 /* REVISIT: fill in JEDEC ids, for parts that have them */
409 { "m25p05", 0x05, 0x0000, 32 * 1024, 2 }, 409 { "m25p05", 0x05, 0x2010, 32 * 1024, 2 },
410 { "m25p10", 0x10, 0x0000, 32 * 1024, 4 }, 410 { "m25p10", 0x10, 0x2011, 32 * 1024, 4 },
411 { "m25p20", 0x11, 0x0000, 64 * 1024, 4 }, 411 { "m25p20", 0x11, 0x2012, 64 * 1024, 4 },
412 { "m25p40", 0x12, 0x0000, 64 * 1024, 8 }, 412 { "m25p40", 0x12, 0x2013, 64 * 1024, 8 },
413 { "m25p80", 0x13, 0x0000, 64 * 1024, 16 }, 413 { "m25p80", 0x13, 0x0000, 64 * 1024, 16 },
414 { "m25p16", 0x14, 0x0000, 64 * 1024, 32 }, 414 { "m25p16", 0x14, 0x2015, 64 * 1024, 32 },
415 { "m25p32", 0x15, 0x0000, 64 * 1024, 64 }, 415 { "m25p32", 0x15, 0x2016, 64 * 1024, 64 },
416 { "m25p64", 0x16, 0x2017, 64 * 1024, 128 }, 416 { "m25p64", 0x16, 0x2017, 64 * 1024, 128 },
417}; 417};
418 418
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index 6f9bbf6fee4d..354e1657cc26 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -4,82 +4,82 @@
4 * PMC551 PCI Mezzanine Ram Device 4 * PMC551 PCI Mezzanine Ram Device
5 * 5 *
6 * Author: 6 * Author:
7 * Mark Ferrell <mferrell@mvista.com> 7 * Mark Ferrell <mferrell@mvista.com>
8 * Copyright 1999,2000 Nortel Networks 8 * Copyright 1999,2000 Nortel Networks
9 * 9 *
10 * License: 10 * License:
11 * As part of this driver was derived from the slram.c driver it 11 * As part of this driver was derived from the slram.c driver it
12 * falls under the same license, which is GNU General Public 12 * falls under the same license, which is GNU General Public
13 * License v2 13 * License v2
14 * 14 *
15 * Description: 15 * Description:
16 * This driver is intended to support the PMC551 PCI Ram device 16 * This driver is intended to support the PMC551 PCI Ram device
17 * from Ramix Inc. The PMC551 is a PMC Mezzanine module for 17 * from Ramix Inc. The PMC551 is a PMC Mezzanine module for
18 * cPCI embedded systems. The device contains a single SROM 18 * cPCI embedded systems. The device contains a single SROM
19 * that initially programs the V370PDC chipset onboard the 19 * that initially programs the V370PDC chipset onboard the
20 * device, and various banks of DRAM/SDRAM onboard. This driver 20 * device, and various banks of DRAM/SDRAM onboard. This driver
21 * implements this PCI Ram device as an MTD (Memory Technology 21 * implements this PCI Ram device as an MTD (Memory Technology
22 * Device) so that it can be used to hold a file system, or for 22 * Device) so that it can be used to hold a file system, or for
23 * added swap space in embedded systems. Since the memory on 23 * added swap space in embedded systems. Since the memory on
24 * this board isn't as fast as main memory we do not try to hook 24 * this board isn't as fast as main memory we do not try to hook
25 * it into main memory as that would simply reduce performance 25 * it into main memory as that would simply reduce performance
26 * on the system. Using it as a block device allows us to use 26 * on the system. Using it as a block device allows us to use
27 * it as high speed swap or for a high speed disk device of some 27 * it as high speed swap or for a high speed disk device of some
28 * sort. Which becomes very useful on diskless systems in the 28 * sort. Which becomes very useful on diskless systems in the
29 * embedded market I might add. 29 * embedded market I might add.
30 * 30 *
31 * Notes: 31 * Notes:
32 * Due to what I assume is more buggy SROM, the 64M PMC551 I 32 * Due to what I assume is more buggy SROM, the 64M PMC551 I
33 * have available claims that all 4 of it's DRAM banks have 64M 33 * have available claims that all 4 of it's DRAM banks have 64M
34 * of ram configured (making a grand total of 256M onboard). 34 * of ram configured (making a grand total of 256M onboard).
35 * This is slightly annoying since the BAR0 size reflects the 35 * This is slightly annoying since the BAR0 size reflects the
36 * aperture size, not the dram size, and the V370PDC supplies no 36 * aperture size, not the dram size, and the V370PDC supplies no
37 * other method for memory size discovery. This problem is 37 * other method for memory size discovery. This problem is
38 * mostly only relevant when compiled as a module, as the 38 * mostly only relevant when compiled as a module, as the
39 * unloading of the module with an aperture size smaller then 39 * unloading of the module with an aperture size smaller then
40 * the ram will cause the driver to detect the onboard memory 40 * the ram will cause the driver to detect the onboard memory
41 * size to be equal to the aperture size when the module is 41 * size to be equal to the aperture size when the module is
42 * reloaded. Soooo, to help, the module supports an msize 42 * reloaded. Soooo, to help, the module supports an msize
43 * option to allow the specification of the onboard memory, and 43 * option to allow the specification of the onboard memory, and
44 * an asize option, to allow the specification of the aperture 44 * an asize option, to allow the specification of the aperture
45 * size. The aperture must be equal to or less then the memory 45 * size. The aperture must be equal to or less then the memory
46 * size, the driver will correct this if you screw it up. This 46 * size, the driver will correct this if you screw it up. This
47 * problem is not relevant for compiled in drivers as compiled 47 * problem is not relevant for compiled in drivers as compiled
48 * in drivers only init once. 48 * in drivers only init once.
49 * 49 *
50 * Credits: 50 * Credits:
51 * Saeed Karamooz <saeed@ramix.com> of Ramix INC. for the 51 * Saeed Karamooz <saeed@ramix.com> of Ramix INC. for the
52 * initial example code of how to initialize this device and for 52 * initial example code of how to initialize this device and for
53 * help with questions I had concerning operation of the device. 53 * help with questions I had concerning operation of the device.
54 * 54 *
55 * Most of the MTD code for this driver was originally written 55 * Most of the MTD code for this driver was originally written
56 * for the slram.o module in the MTD drivers package which 56 * for the slram.o module in the MTD drivers package which
57 * allows the mapping of system memory into an MTD device. 57 * allows the mapping of system memory into an MTD device.
58 * Since the PMC551 memory module is accessed in the same 58 * Since the PMC551 memory module is accessed in the same
59 * fashion as system memory, the slram.c code became a very nice 59 * fashion as system memory, the slram.c code became a very nice
60 * fit to the needs of this driver. All we added was PCI 60 * fit to the needs of this driver. All we added was PCI
61 * detection/initialization to the driver and automatically figure 61 * detection/initialization to the driver and automatically figure
62 * out the size via the PCI detection.o, later changes by Corey 62 * out the size via the PCI detection.o, later changes by Corey
63 * Minyard set up the card to utilize a 1M sliding apature. 63 * Minyard set up the card to utilize a 1M sliding apature.
64 * 64 *
65 * Corey Minyard <minyard@nortelnetworks.com> 65 * Corey Minyard <minyard@nortelnetworks.com>
66 * * Modified driver to utilize a sliding aperture instead of 66 * * Modified driver to utilize a sliding aperture instead of
67 * mapping all memory into kernel space which turned out to 67 * mapping all memory into kernel space which turned out to
68 * be very wasteful. 68 * be very wasteful.
69 * * Located a bug in the SROM's initialization sequence that 69 * * Located a bug in the SROM's initialization sequence that
70 * made the memory unusable, added a fix to code to touch up 70 * made the memory unusable, added a fix to code to touch up
71 * the DRAM some. 71 * the DRAM some.
72 * 72 *
73 * Bugs/FIXME's: 73 * Bugs/FIXME's:
74 * * MUST fix the init function to not spin on a register 74 * * MUST fix the init function to not spin on a register
75 * waiting for it to set .. this does not safely handle busted 75 * waiting for it to set .. this does not safely handle busted
76 * devices that never reset the register correctly which will 76 * devices that never reset the register correctly which will
77 * cause the system to hang w/ a reboot being the only chance at 77 * cause the system to hang w/ a reboot being the only chance at
78 * recover. [sort of fixed, could be better] 78 * recover. [sort of fixed, could be better]
79 * * Add I2C handling of the SROM so we can read the SROM's information 79 * * Add I2C handling of the SROM so we can read the SROM's information
80 * about the aperture size. This should always accurately reflect the 80 * about the aperture size. This should always accurately reflect the
81 * onboard memory size. 81 * onboard memory size.
82 * * Comb the init routine. It's still a bit cludgy on a few things. 82 * * Comb the init routine. It's still a bit cludgy on a few things.
83 */ 83 */
84 84
85#include <linux/kernel.h> 85#include <linux/kernel.h>
@@ -99,84 +99,83 @@
99#include <asm/system.h> 99#include <asm/system.h>
100#include <linux/pci.h> 100#include <linux/pci.h>
101 101
102#ifndef CONFIG_PCI
103#error Enable PCI in your kernel config
104#endif
105
106#include <linux/mtd/mtd.h> 102#include <linux/mtd/mtd.h>
107#include <linux/mtd/pmc551.h> 103#include <linux/mtd/pmc551.h>
108#include <linux/mtd/compatmac.h> 104#include <linux/mtd/compatmac.h>
109 105
110static struct mtd_info *pmc551list; 106static struct mtd_info *pmc551list;
111 107
112static int pmc551_erase (struct mtd_info *mtd, struct erase_info *instr) 108static int pmc551_erase(struct mtd_info *mtd, struct erase_info *instr)
113{ 109{
114 struct mypriv *priv = mtd->priv; 110 struct mypriv *priv = mtd->priv;
115 u32 soff_hi, soff_lo; /* start address offset hi/lo */ 111 u32 soff_hi, soff_lo; /* start address offset hi/lo */
116 u32 eoff_hi, eoff_lo; /* end address offset hi/lo */ 112 u32 eoff_hi, eoff_lo; /* end address offset hi/lo */
117 unsigned long end; 113 unsigned long end;
118 u_char *ptr; 114 u_char *ptr;
119 size_t retlen; 115 size_t retlen;
120 116
121#ifdef CONFIG_MTD_PMC551_DEBUG 117#ifdef CONFIG_MTD_PMC551_DEBUG
122 printk(KERN_DEBUG "pmc551_erase(pos:%ld, len:%ld)\n", (long)instr->addr, (long)instr->len); 118 printk(KERN_DEBUG "pmc551_erase(pos:%ld, len:%ld)\n", (long)instr->addr,
119 (long)instr->len);
123#endif 120#endif
124 121
125 end = instr->addr + instr->len - 1; 122 end = instr->addr + instr->len - 1;
126 123
127 /* Is it past the end? */ 124 /* Is it past the end? */
128 if ( end > mtd->size ) { 125 if (end > mtd->size) {
129#ifdef CONFIG_MTD_PMC551_DEBUG 126#ifdef CONFIG_MTD_PMC551_DEBUG
130 printk(KERN_DEBUG "pmc551_erase() out of bounds (%ld > %ld)\n", (long)end, (long)mtd->size); 127 printk(KERN_DEBUG "pmc551_erase() out of bounds (%ld > %ld)\n",
128 (long)end, (long)mtd->size);
131#endif 129#endif
132 return -EINVAL; 130 return -EINVAL;
133 } 131 }
134 132
135 eoff_hi = end & ~(priv->asize - 1); 133 eoff_hi = end & ~(priv->asize - 1);
136 soff_hi = instr->addr & ~(priv->asize - 1); 134 soff_hi = instr->addr & ~(priv->asize - 1);
137 eoff_lo = end & (priv->asize - 1); 135 eoff_lo = end & (priv->asize - 1);
138 soff_lo = instr->addr & (priv->asize - 1); 136 soff_lo = instr->addr & (priv->asize - 1);
139 137
140 pmc551_point (mtd, instr->addr, instr->len, &retlen, &ptr); 138 pmc551_point(mtd, instr->addr, instr->len, &retlen, &ptr);
141 139
142 if ( soff_hi == eoff_hi || mtd->size == priv->asize) { 140 if (soff_hi == eoff_hi || mtd->size == priv->asize) {
143 /* The whole thing fits within one access, so just one shot 141 /* The whole thing fits within one access, so just one shot
144 will do it. */ 142 will do it. */
145 memset(ptr, 0xff, instr->len); 143 memset(ptr, 0xff, instr->len);
146 } else { 144 } else {
147 /* We have to do multiple writes to get all the data 145 /* We have to do multiple writes to get all the data
148 written. */ 146 written. */
149 while (soff_hi != eoff_hi) { 147 while (soff_hi != eoff_hi) {
150#ifdef CONFIG_MTD_PMC551_DEBUG 148#ifdef CONFIG_MTD_PMC551_DEBUG
151 printk( KERN_DEBUG "pmc551_erase() soff_hi: %ld, eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi); 149 printk(KERN_DEBUG "pmc551_erase() soff_hi: %ld, "
150 "eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi);
152#endif 151#endif
153 memset(ptr, 0xff, priv->asize); 152 memset(ptr, 0xff, priv->asize);
154 if (soff_hi + priv->asize >= mtd->size) { 153 if (soff_hi + priv->asize >= mtd->size) {
155 goto out; 154 goto out;
156 } 155 }
157 soff_hi += priv->asize; 156 soff_hi += priv->asize;
158 pmc551_point (mtd,(priv->base_map0|soff_hi), 157 pmc551_point(mtd, (priv->base_map0 | soff_hi),
159 priv->asize, &retlen, &ptr); 158 priv->asize, &retlen, &ptr);
160 } 159 }
161 memset (ptr, 0xff, eoff_lo); 160 memset(ptr, 0xff, eoff_lo);
162 } 161 }
163 162
164out: 163 out:
165 instr->state = MTD_ERASE_DONE; 164 instr->state = MTD_ERASE_DONE;
166#ifdef CONFIG_MTD_PMC551_DEBUG 165#ifdef CONFIG_MTD_PMC551_DEBUG
167 printk(KERN_DEBUG "pmc551_erase() done\n"); 166 printk(KERN_DEBUG "pmc551_erase() done\n");
168#endif 167#endif
169 168
170 mtd_erase_callback(instr); 169 mtd_erase_callback(instr);
171 return 0; 170 return 0;
172} 171}
173 172
174 173static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
175static int pmc551_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf) 174 size_t * retlen, u_char ** mtdbuf)
176{ 175{
177 struct mypriv *priv = mtd->priv; 176 struct mypriv *priv = mtd->priv;
178 u32 soff_hi; 177 u32 soff_hi;
179 u32 soff_lo; 178 u32 soff_lo;
180 179
181#ifdef CONFIG_MTD_PMC551_DEBUG 180#ifdef CONFIG_MTD_PMC551_DEBUG
182 printk(KERN_DEBUG "pmc551_point(%ld, %ld)\n", (long)from, (long)len); 181 printk(KERN_DEBUG "pmc551_point(%ld, %ld)\n", (long)from, (long)len);
@@ -184,18 +183,19 @@ static int pmc551_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *
184 183
185 if (from + len > mtd->size) { 184 if (from + len > mtd->size) {
186#ifdef CONFIG_MTD_PMC551_DEBUG 185#ifdef CONFIG_MTD_PMC551_DEBUG
187 printk(KERN_DEBUG "pmc551_point() out of bounds (%ld > %ld)\n", (long)from+len, (long)mtd->size); 186 printk(KERN_DEBUG "pmc551_point() out of bounds (%ld > %ld)\n",
187 (long)from + len, (long)mtd->size);
188#endif 188#endif
189 return -EINVAL; 189 return -EINVAL;
190 } 190 }
191 191
192 soff_hi = from & ~(priv->asize - 1); 192 soff_hi = from & ~(priv->asize - 1);
193 soff_lo = from & (priv->asize - 1); 193 soff_lo = from & (priv->asize - 1);
194 194
195 /* Cheap hack optimization */ 195 /* Cheap hack optimization */
196 if( priv->curr_map0 != from ) { 196 if (priv->curr_map0 != from) {
197 pci_write_config_dword ( priv->dev, PMC551_PCI_MEM_MAP0, 197 pci_write_config_dword(priv->dev, PMC551_PCI_MEM_MAP0,
198 (priv->base_map0 | soff_hi) ); 198 (priv->base_map0 | soff_hi));
199 priv->curr_map0 = soff_hi; 199 priv->curr_map0 = soff_hi;
200 } 200 }
201 201
@@ -204,137 +204,144 @@ static int pmc551_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *
204 return 0; 204 return 0;
205} 205}
206 206
207 207static void pmc551_unpoint(struct mtd_info *mtd, u_char * addr, loff_t from,
208static void pmc551_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len) 208 size_t len)
209{ 209{
210#ifdef CONFIG_MTD_PMC551_DEBUG 210#ifdef CONFIG_MTD_PMC551_DEBUG
211 printk(KERN_DEBUG "pmc551_unpoint()\n"); 211 printk(KERN_DEBUG "pmc551_unpoint()\n");
212#endif 212#endif
213} 213}
214 214
215 215static int pmc551_read(struct mtd_info *mtd, loff_t from, size_t len,
216static int pmc551_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 216 size_t * retlen, u_char * buf)
217{ 217{
218 struct mypriv *priv = mtd->priv; 218 struct mypriv *priv = mtd->priv;
219 u32 soff_hi, soff_lo; /* start address offset hi/lo */ 219 u32 soff_hi, soff_lo; /* start address offset hi/lo */
220 u32 eoff_hi, eoff_lo; /* end address offset hi/lo */ 220 u32 eoff_hi, eoff_lo; /* end address offset hi/lo */
221 unsigned long end; 221 unsigned long end;
222 u_char *ptr; 222 u_char *ptr;
223 u_char *copyto = buf; 223 u_char *copyto = buf;
224 224
225#ifdef CONFIG_MTD_PMC551_DEBUG 225#ifdef CONFIG_MTD_PMC551_DEBUG
226 printk(KERN_DEBUG "pmc551_read(pos:%ld, len:%ld) asize: %ld\n", (long)from, (long)len, (long)priv->asize); 226 printk(KERN_DEBUG "pmc551_read(pos:%ld, len:%ld) asize: %ld\n",
227 (long)from, (long)len, (long)priv->asize);
227#endif 228#endif
228 229
229 end = from + len - 1; 230 end = from + len - 1;
230 231
231 /* Is it past the end? */ 232 /* Is it past the end? */
232 if (end > mtd->size) { 233 if (end > mtd->size) {
233#ifdef CONFIG_MTD_PMC551_DEBUG 234#ifdef CONFIG_MTD_PMC551_DEBUG
234 printk(KERN_DEBUG "pmc551_read() out of bounds (%ld > %ld)\n", (long) end, (long)mtd->size); 235 printk(KERN_DEBUG "pmc551_read() out of bounds (%ld > %ld)\n",
236 (long)end, (long)mtd->size);
235#endif 237#endif
236 return -EINVAL; 238 return -EINVAL;
237 } 239 }
238 240
239 soff_hi = from & ~(priv->asize - 1); 241 soff_hi = from & ~(priv->asize - 1);
240 eoff_hi = end & ~(priv->asize - 1); 242 eoff_hi = end & ~(priv->asize - 1);
241 soff_lo = from & (priv->asize - 1); 243 soff_lo = from & (priv->asize - 1);
242 eoff_lo = end & (priv->asize - 1); 244 eoff_lo = end & (priv->asize - 1);
243 245
244 pmc551_point (mtd, from, len, retlen, &ptr); 246 pmc551_point(mtd, from, len, retlen, &ptr);
245 247
246 if (soff_hi == eoff_hi) { 248 if (soff_hi == eoff_hi) {
247 /* The whole thing fits within one access, so just one shot 249 /* The whole thing fits within one access, so just one shot
248 will do it. */ 250 will do it. */
249 memcpy(copyto, ptr, len); 251 memcpy(copyto, ptr, len);
250 copyto += len; 252 copyto += len;
251 } else { 253 } else {
252 /* We have to do multiple writes to get all the data 254 /* We have to do multiple writes to get all the data
253 written. */ 255 written. */
254 while (soff_hi != eoff_hi) { 256 while (soff_hi != eoff_hi) {
255#ifdef CONFIG_MTD_PMC551_DEBUG 257#ifdef CONFIG_MTD_PMC551_DEBUG
256 printk( KERN_DEBUG "pmc551_read() soff_hi: %ld, eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi); 258 printk(KERN_DEBUG "pmc551_read() soff_hi: %ld, "
259 "eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi);
257#endif 260#endif
258 memcpy(copyto, ptr, priv->asize); 261 memcpy(copyto, ptr, priv->asize);
259 copyto += priv->asize; 262 copyto += priv->asize;
260 if (soff_hi + priv->asize >= mtd->size) { 263 if (soff_hi + priv->asize >= mtd->size) {
261 goto out; 264 goto out;
262 } 265 }
263 soff_hi += priv->asize; 266 soff_hi += priv->asize;
264 pmc551_point (mtd, soff_hi, priv->asize, retlen, &ptr); 267 pmc551_point(mtd, soff_hi, priv->asize, retlen, &ptr);
265 } 268 }
266 memcpy(copyto, ptr, eoff_lo); 269 memcpy(copyto, ptr, eoff_lo);
267 copyto += eoff_lo; 270 copyto += eoff_lo;
268 } 271 }
269 272
270out: 273 out:
271#ifdef CONFIG_MTD_PMC551_DEBUG 274#ifdef CONFIG_MTD_PMC551_DEBUG
272 printk(KERN_DEBUG "pmc551_read() done\n"); 275 printk(KERN_DEBUG "pmc551_read() done\n");
273#endif 276#endif
274 *retlen = copyto - buf; 277 *retlen = copyto - buf;
275 return 0; 278 return 0;
276} 279}
277 280
278static int pmc551_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) 281static int pmc551_write(struct mtd_info *mtd, loff_t to, size_t len,
282 size_t * retlen, const u_char * buf)
279{ 283{
280 struct mypriv *priv = mtd->priv; 284 struct mypriv *priv = mtd->priv;
281 u32 soff_hi, soff_lo; /* start address offset hi/lo */ 285 u32 soff_hi, soff_lo; /* start address offset hi/lo */
282 u32 eoff_hi, eoff_lo; /* end address offset hi/lo */ 286 u32 eoff_hi, eoff_lo; /* end address offset hi/lo */
283 unsigned long end; 287 unsigned long end;
284 u_char *ptr; 288 u_char *ptr;
285 const u_char *copyfrom = buf; 289 const u_char *copyfrom = buf;
286
287 290
288#ifdef CONFIG_MTD_PMC551_DEBUG 291#ifdef CONFIG_MTD_PMC551_DEBUG
289 printk(KERN_DEBUG "pmc551_write(pos:%ld, len:%ld) asize:%ld\n", (long)to, (long)len, (long)priv->asize); 292 printk(KERN_DEBUG "pmc551_write(pos:%ld, len:%ld) asize:%ld\n",
293 (long)to, (long)len, (long)priv->asize);
290#endif 294#endif
291 295
292 end = to + len - 1; 296 end = to + len - 1;
293 /* Is it past the end? or did the u32 wrap? */ 297 /* Is it past the end? or did the u32 wrap? */
294 if (end > mtd->size ) { 298 if (end > mtd->size) {
295#ifdef CONFIG_MTD_PMC551_DEBUG 299#ifdef CONFIG_MTD_PMC551_DEBUG
296 printk(KERN_DEBUG "pmc551_write() out of bounds (end: %ld, size: %ld, to: %ld)\n", (long) end, (long)mtd->size, (long)to); 300 printk(KERN_DEBUG "pmc551_write() out of bounds (end: %ld, "
301 "size: %ld, to: %ld)\n", (long)end, (long)mtd->size,
302 (long)to);
297#endif 303#endif
298 return -EINVAL; 304 return -EINVAL;
299 } 305 }
300 306
301 soff_hi = to & ~(priv->asize - 1); 307 soff_hi = to & ~(priv->asize - 1);
302 eoff_hi = end & ~(priv->asize - 1); 308 eoff_hi = end & ~(priv->asize - 1);
303 soff_lo = to & (priv->asize - 1); 309 soff_lo = to & (priv->asize - 1);
304 eoff_lo = end & (priv->asize - 1); 310 eoff_lo = end & (priv->asize - 1);
305 311
306 pmc551_point (mtd, to, len, retlen, &ptr); 312 pmc551_point(mtd, to, len, retlen, &ptr);
307 313
308 if (soff_hi == eoff_hi) { 314 if (soff_hi == eoff_hi) {
309 /* The whole thing fits within one access, so just one shot 315 /* The whole thing fits within one access, so just one shot
310 will do it. */ 316 will do it. */
311 memcpy(ptr, copyfrom, len); 317 memcpy(ptr, copyfrom, len);
312 copyfrom += len; 318 copyfrom += len;
313 } else { 319 } else {
314 /* We have to do multiple writes to get all the data 320 /* We have to do multiple writes to get all the data
315 written. */ 321 written. */
316 while (soff_hi != eoff_hi) { 322 while (soff_hi != eoff_hi) {
317#ifdef CONFIG_MTD_PMC551_DEBUG 323#ifdef CONFIG_MTD_PMC551_DEBUG
318 printk( KERN_DEBUG "pmc551_write() soff_hi: %ld, eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi); 324 printk(KERN_DEBUG "pmc551_write() soff_hi: %ld, "
325 "eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi);
319#endif 326#endif
320 memcpy(ptr, copyfrom, priv->asize); 327 memcpy(ptr, copyfrom, priv->asize);
321 copyfrom += priv->asize; 328 copyfrom += priv->asize;
322 if (soff_hi >= mtd->size) { 329 if (soff_hi >= mtd->size) {
323 goto out; 330 goto out;
324 } 331 }
325 soff_hi += priv->asize; 332 soff_hi += priv->asize;
326 pmc551_point (mtd, soff_hi, priv->asize, retlen, &ptr); 333 pmc551_point(mtd, soff_hi, priv->asize, retlen, &ptr);
327 } 334 }
328 memcpy(ptr, copyfrom, eoff_lo); 335 memcpy(ptr, copyfrom, eoff_lo);
329 copyfrom += eoff_lo; 336 copyfrom += eoff_lo;
330 } 337 }
331 338
332out: 339 out:
333#ifdef CONFIG_MTD_PMC551_DEBUG 340#ifdef CONFIG_MTD_PMC551_DEBUG
334 printk(KERN_DEBUG "pmc551_write() done\n"); 341 printk(KERN_DEBUG "pmc551_write() done\n");
335#endif 342#endif
336 *retlen = copyfrom - buf; 343 *retlen = copyfrom - buf;
337 return 0; 344 return 0;
338} 345}
339 346
340/* 347/*
@@ -349,58 +356,58 @@ out:
349 * mechanism 356 * mechanism
350 * returns the size of the memory region found. 357 * returns the size of the memory region found.
351 */ 358 */
352static u32 fixup_pmc551 (struct pci_dev *dev) 359static u32 fixup_pmc551(struct pci_dev *dev)
353{ 360{
354#ifdef CONFIG_MTD_PMC551_BUGFIX 361#ifdef CONFIG_MTD_PMC551_BUGFIX
355 u32 dram_data; 362 u32 dram_data;
356#endif 363#endif
357 u32 size, dcmd, cfg, dtmp; 364 u32 size, dcmd, cfg, dtmp;
358 u16 cmd, tmp, i; 365 u16 cmd, tmp, i;
359 u8 bcmd, counter; 366 u8 bcmd, counter;
360 367
361 /* Sanity Check */ 368 /* Sanity Check */
362 if(!dev) { 369 if (!dev) {
363 return -ENODEV; 370 return -ENODEV;
364 } 371 }
365 372
366 /* 373 /*
367 * Attempt to reset the card 374 * Attempt to reset the card
368 * FIXME: Stop Spinning registers 375 * FIXME: Stop Spinning registers
369 */ 376 */
370 counter=0; 377 counter = 0;
371 /* unlock registers */ 378 /* unlock registers */
372 pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, 0xA5 ); 379 pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, 0xA5);
373 /* read in old data */ 380 /* read in old data */
374 pci_read_config_byte(dev, PMC551_SYS_CTRL_REG, &bcmd ); 381 pci_read_config_byte(dev, PMC551_SYS_CTRL_REG, &bcmd);
375 /* bang the reset line up and down for a few */ 382 /* bang the reset line up and down for a few */
376 for(i=0;i<10;i++) { 383 for (i = 0; i < 10; i++) {
377 counter=0; 384 counter = 0;
378 bcmd &= ~0x80; 385 bcmd &= ~0x80;
379 while(counter++ < 100) { 386 while (counter++ < 100) {
380 pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd); 387 pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd);
381 } 388 }
382 counter=0; 389 counter = 0;
383 bcmd |= 0x80; 390 bcmd |= 0x80;
384 while(counter++ < 100) { 391 while (counter++ < 100) {
385 pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd); 392 pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd);
386 } 393 }
387 } 394 }
388 bcmd |= (0x40|0x20); 395 bcmd |= (0x40 | 0x20);
389 pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd); 396 pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd);
390 397
391 /* 398 /*
392 * Take care and turn off the memory on the device while we 399 * Take care and turn off the memory on the device while we
393 * tweak the configurations 400 * tweak the configurations
394 */ 401 */
395 pci_read_config_word(dev, PCI_COMMAND, &cmd); 402 pci_read_config_word(dev, PCI_COMMAND, &cmd);
396 tmp = cmd & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY); 403 tmp = cmd & ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
397 pci_write_config_word(dev, PCI_COMMAND, tmp); 404 pci_write_config_word(dev, PCI_COMMAND, tmp);
398 405
399 /* 406 /*
400 * Disable existing aperture before probing memory size 407 * Disable existing aperture before probing memory size
401 */ 408 */
402 pci_read_config_dword(dev, PMC551_PCI_MEM_MAP0, &dcmd); 409 pci_read_config_dword(dev, PMC551_PCI_MEM_MAP0, &dcmd);
403 dtmp=(dcmd|PMC551_PCI_MEM_MAP_ENABLE|PMC551_PCI_MEM_MAP_REG_EN); 410 dtmp = (dcmd | PMC551_PCI_MEM_MAP_ENABLE | PMC551_PCI_MEM_MAP_REG_EN);
404 pci_write_config_dword(dev, PMC551_PCI_MEM_MAP0, dtmp); 411 pci_write_config_dword(dev, PMC551_PCI_MEM_MAP0, dtmp);
405 /* 412 /*
406 * Grab old BAR0 config so that we can figure out memory size 413 * Grab old BAR0 config so that we can figure out memory size
@@ -411,220 +418,230 @@ static u32 fixup_pmc551 (struct pci_dev *dev)
411 * then write all 1's to the memory space, read back the result into 418 * then write all 1's to the memory space, read back the result into
412 * "size", and then write back all the old config. 419 * "size", and then write back all the old config.
413 */ 420 */
414 pci_read_config_dword( dev, PCI_BASE_ADDRESS_0, &cfg ); 421 pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &cfg);
415#ifndef CONFIG_MTD_PMC551_BUGFIX 422#ifndef CONFIG_MTD_PMC551_BUGFIX
416 pci_write_config_dword( dev, PCI_BASE_ADDRESS_0, ~0 ); 423 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, ~0);
417 pci_read_config_dword( dev, PCI_BASE_ADDRESS_0, &size ); 424 pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &size);
418 size = (size&PCI_BASE_ADDRESS_MEM_MASK); 425 size = (size & PCI_BASE_ADDRESS_MEM_MASK);
419 size &= ~(size-1); 426 size &= ~(size - 1);
420 pci_write_config_dword( dev, PCI_BASE_ADDRESS_0, cfg ); 427 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, cfg);
421#else 428#else
422 /* 429 /*
423 * Get the size of the memory by reading all the DRAM size values 430 * Get the size of the memory by reading all the DRAM size values
424 * and adding them up. 431 * and adding them up.
425 * 432 *
426 * KLUDGE ALERT: the boards we are using have invalid column and 433 * KLUDGE ALERT: the boards we are using have invalid column and
427 * row mux values. We fix them here, but this will break other 434 * row mux values. We fix them here, but this will break other
428 * memory configurations. 435 * memory configurations.
429 */ 436 */
430 pci_read_config_dword(dev, PMC551_DRAM_BLK0, &dram_data); 437 pci_read_config_dword(dev, PMC551_DRAM_BLK0, &dram_data);
431 size = PMC551_DRAM_BLK_GET_SIZE(dram_data); 438 size = PMC551_DRAM_BLK_GET_SIZE(dram_data);
432 dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5); 439 dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
433 dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9); 440 dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
434 pci_write_config_dword(dev, PMC551_DRAM_BLK0, dram_data); 441 pci_write_config_dword(dev, PMC551_DRAM_BLK0, dram_data);
435 442
436 pci_read_config_dword(dev, PMC551_DRAM_BLK1, &dram_data); 443 pci_read_config_dword(dev, PMC551_DRAM_BLK1, &dram_data);
437 size += PMC551_DRAM_BLK_GET_SIZE(dram_data); 444 size += PMC551_DRAM_BLK_GET_SIZE(dram_data);
438 dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5); 445 dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
439 dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9); 446 dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
440 pci_write_config_dword(dev, PMC551_DRAM_BLK1, dram_data); 447 pci_write_config_dword(dev, PMC551_DRAM_BLK1, dram_data);
441 448
442 pci_read_config_dword(dev, PMC551_DRAM_BLK2, &dram_data); 449 pci_read_config_dword(dev, PMC551_DRAM_BLK2, &dram_data);
443 size += PMC551_DRAM_BLK_GET_SIZE(dram_data); 450 size += PMC551_DRAM_BLK_GET_SIZE(dram_data);
444 dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5); 451 dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
445 dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9); 452 dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
446 pci_write_config_dword(dev, PMC551_DRAM_BLK2, dram_data); 453 pci_write_config_dword(dev, PMC551_DRAM_BLK2, dram_data);
447 454
448 pci_read_config_dword(dev, PMC551_DRAM_BLK3, &dram_data); 455 pci_read_config_dword(dev, PMC551_DRAM_BLK3, &dram_data);
449 size += PMC551_DRAM_BLK_GET_SIZE(dram_data); 456 size += PMC551_DRAM_BLK_GET_SIZE(dram_data);
450 dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5); 457 dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5);
451 dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9); 458 dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9);
452 pci_write_config_dword(dev, PMC551_DRAM_BLK3, dram_data); 459 pci_write_config_dword(dev, PMC551_DRAM_BLK3, dram_data);
453 460
454 /* 461 /*
455 * Oops .. something went wrong 462 * Oops .. something went wrong
456 */ 463 */
457 if( (size &= PCI_BASE_ADDRESS_MEM_MASK) == 0) { 464 if ((size &= PCI_BASE_ADDRESS_MEM_MASK) == 0) {
458 return -ENODEV; 465 return -ENODEV;
459 } 466 }
460#endif /* CONFIG_MTD_PMC551_BUGFIX */ 467#endif /* CONFIG_MTD_PMC551_BUGFIX */
461 468
462 if ((cfg&PCI_BASE_ADDRESS_SPACE) != PCI_BASE_ADDRESS_SPACE_MEMORY) { 469 if ((cfg & PCI_BASE_ADDRESS_SPACE) != PCI_BASE_ADDRESS_SPACE_MEMORY) {
463 return -ENODEV; 470 return -ENODEV;
464 } 471 }
465 472
466 /* 473 /*
467 * Precharge Dram 474 * Precharge Dram
468 */ 475 */
469 pci_write_config_word( dev, PMC551_SDRAM_MA, 0x0400 ); 476 pci_write_config_word(dev, PMC551_SDRAM_MA, 0x0400);
470 pci_write_config_word( dev, PMC551_SDRAM_CMD, 0x00bf ); 477 pci_write_config_word(dev, PMC551_SDRAM_CMD, 0x00bf);
471 478
472 /* 479 /*
473 * Wait until command has gone through 480 * Wait until command has gone through
474 * FIXME: register spinning issue 481 * FIXME: register spinning issue
475 */ 482 */
476 do { pci_read_config_word( dev, PMC551_SDRAM_CMD, &cmd ); 483 do {
477 if(counter++ > 100)break; 484 pci_read_config_word(dev, PMC551_SDRAM_CMD, &cmd);
478 } while ( (PCI_COMMAND_IO) & cmd ); 485 if (counter++ > 100)
479 486 break;
480 /* 487 } while ((PCI_COMMAND_IO) & cmd);
488
489 /*
481 * Turn on auto refresh 490 * Turn on auto refresh
482 * The loop is taken directly from Ramix's example code. I assume that 491 * The loop is taken directly from Ramix's example code. I assume that
483 * this must be held high for some duration of time, but I can find no 492 * this must be held high for some duration of time, but I can find no
484 * documentation refrencing the reasons why. 493 * documentation refrencing the reasons why.
485 */ 494 */
486 for ( i = 1; i<=8 ; i++) { 495 for (i = 1; i <= 8; i++) {
487 pci_write_config_word (dev, PMC551_SDRAM_CMD, 0x0df); 496 pci_write_config_word(dev, PMC551_SDRAM_CMD, 0x0df);
488 497
489 /* 498 /*
490 * Make certain command has gone through 499 * Make certain command has gone through
491 * FIXME: register spinning issue 500 * FIXME: register spinning issue
492 */ 501 */
493 counter=0; 502 counter = 0;
494 do { pci_read_config_word(dev, PMC551_SDRAM_CMD, &cmd); 503 do {
495 if(counter++ > 100)break; 504 pci_read_config_word(dev, PMC551_SDRAM_CMD, &cmd);
496 } while ( (PCI_COMMAND_IO) & cmd ); 505 if (counter++ > 100)
497 } 506 break;
498 507 } while ((PCI_COMMAND_IO) & cmd);
499 pci_write_config_word ( dev, PMC551_SDRAM_MA, 0x0020); 508 }
500 pci_write_config_word ( dev, PMC551_SDRAM_CMD, 0x0ff); 509
501 510 pci_write_config_word(dev, PMC551_SDRAM_MA, 0x0020);
502 /* 511 pci_write_config_word(dev, PMC551_SDRAM_CMD, 0x0ff);
503 * Wait until command completes 512
504 * FIXME: register spinning issue 513 /*
505 */ 514 * Wait until command completes
506 counter=0; 515 * FIXME: register spinning issue
507 do { pci_read_config_word ( dev, PMC551_SDRAM_CMD, &cmd); 516 */
508 if(counter++ > 100)break; 517 counter = 0;
509 } while ( (PCI_COMMAND_IO) & cmd ); 518 do {
510 519 pci_read_config_word(dev, PMC551_SDRAM_CMD, &cmd);
511 pci_read_config_dword ( dev, PMC551_DRAM_CFG, &dcmd); 520 if (counter++ > 100)
512 dcmd |= 0x02000000; 521 break;
513 pci_write_config_dword ( dev, PMC551_DRAM_CFG, dcmd); 522 } while ((PCI_COMMAND_IO) & cmd);
514 523
515 /* 524 pci_read_config_dword(dev, PMC551_DRAM_CFG, &dcmd);
516 * Check to make certain fast back-to-back, if not 525 dcmd |= 0x02000000;
517 * then set it so 526 pci_write_config_dword(dev, PMC551_DRAM_CFG, dcmd);
518 */ 527
519 pci_read_config_word( dev, PCI_STATUS, &cmd); 528 /*
520 if((cmd&PCI_COMMAND_FAST_BACK) == 0) { 529 * Check to make certain fast back-to-back, if not
521 cmd |= PCI_COMMAND_FAST_BACK; 530 * then set it so
522 pci_write_config_word( dev, PCI_STATUS, cmd); 531 */
523 } 532 pci_read_config_word(dev, PCI_STATUS, &cmd);
524 533 if ((cmd & PCI_COMMAND_FAST_BACK) == 0) {
525 /* 534 cmd |= PCI_COMMAND_FAST_BACK;
526 * Check to make certain the DEVSEL is set correctly, this device 535 pci_write_config_word(dev, PCI_STATUS, cmd);
527 * has a tendancy to assert DEVSEL and TRDY when a write is performed 536 }
528 * to the memory when memory is read-only 537
529 */ 538 /*
530 if((cmd&PCI_STATUS_DEVSEL_MASK) != 0x0) { 539 * Check to make certain the DEVSEL is set correctly, this device
531 cmd &= ~PCI_STATUS_DEVSEL_MASK; 540 * has a tendancy to assert DEVSEL and TRDY when a write is performed
532 pci_write_config_word( dev, PCI_STATUS, cmd ); 541 * to the memory when memory is read-only
533 } 542 */
534 /* 543 if ((cmd & PCI_STATUS_DEVSEL_MASK) != 0x0) {
535 * Set to be prefetchable and put everything back based on old cfg. 544 cmd &= ~PCI_STATUS_DEVSEL_MASK;
545 pci_write_config_word(dev, PCI_STATUS, cmd);
546 }
547 /*
548 * Set to be prefetchable and put everything back based on old cfg.
536 * it's possible that the reset of the V370PDC nuked the original 549 * it's possible that the reset of the V370PDC nuked the original
537 * setup 550 * setup
538 */ 551 */
552 /*
553 cfg |= PCI_BASE_ADDRESS_MEM_PREFETCH;
554 pci_write_config_dword( dev, PCI_BASE_ADDRESS_0, cfg );
555 */
556
539 /* 557 /*
540 cfg |= PCI_BASE_ADDRESS_MEM_PREFETCH; 558 * Turn PCI memory and I/O bus access back on
541 pci_write_config_dword( dev, PCI_BASE_ADDRESS_0, cfg ); 559 */
542 */ 560 pci_write_config_word(dev, PCI_COMMAND,
543 561 PCI_COMMAND_MEMORY | PCI_COMMAND_IO);
544 /*
545 * Turn PCI memory and I/O bus access back on
546 */
547 pci_write_config_word( dev, PCI_COMMAND,
548 PCI_COMMAND_MEMORY | PCI_COMMAND_IO );
549#ifdef CONFIG_MTD_PMC551_DEBUG 562#ifdef CONFIG_MTD_PMC551_DEBUG
550 /* 563 /*
551 * Some screen fun 564 * Some screen fun
552 */ 565 */
553 printk(KERN_DEBUG "pmc551: %d%c (0x%x) of %sprefetchable memory at 0x%llx\n", 566 printk(KERN_DEBUG "pmc551: %d%c (0x%x) of %sprefetchable memory at "
554 (size<1024)?size:(size<1048576)?size>>10:size>>20, 567 "0x%llx\n", (size < 1024) ? size : (size < 1048576) ?
555 (size<1024)?'B':(size<1048576)?'K':'M', 568 size >> 10 : size >> 20,
556 size, ((dcmd&(0x1<<3)) == 0)?"non-":"", 569 (size < 1024) ? 'B' : (size < 1048576) ? 'K' : 'M', size,
557 (unsigned long long)((dev->resource[0].start)&PCI_BASE_ADDRESS_MEM_MASK)); 570 ((dcmd & (0x1 << 3)) == 0) ? "non-" : "",
558 571 (unsigned long long)pci_resource_start(dev, 0));
559 /* 572
560 * Check to see the state of the memory 573 /*
561 */ 574 * Check to see the state of the memory
562 pci_read_config_dword( dev, PMC551_DRAM_BLK0, &dcmd ); 575 */
563 printk(KERN_DEBUG "pmc551: DRAM_BLK0 Flags: %s,%s\n" 576 pci_read_config_dword(dev, PMC551_DRAM_BLK0, &dcmd);
564 "pmc551: DRAM_BLK0 Size: %d at %d\n" 577 printk(KERN_DEBUG "pmc551: DRAM_BLK0 Flags: %s,%s\n"
565 "pmc551: DRAM_BLK0 Row MUX: %d, Col MUX: %d\n", 578 "pmc551: DRAM_BLK0 Size: %d at %d\n"
566 (((0x1<<1)&dcmd) == 0)?"RW":"RO", 579 "pmc551: DRAM_BLK0 Row MUX: %d, Col MUX: %d\n",
567 (((0x1<<0)&dcmd) == 0)?"Off":"On", 580 (((0x1 << 1) & dcmd) == 0) ? "RW" : "RO",
568 PMC551_DRAM_BLK_GET_SIZE(dcmd), 581 (((0x1 << 0) & dcmd) == 0) ? "Off" : "On",
569 ((dcmd>>20)&0x7FF), ((dcmd>>13)&0x7), ((dcmd>>9)&0xF) ); 582 PMC551_DRAM_BLK_GET_SIZE(dcmd),
570 583 ((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7),
571 pci_read_config_dword( dev, PMC551_DRAM_BLK1, &dcmd ); 584 ((dcmd >> 9) & 0xF));
572 printk(KERN_DEBUG "pmc551: DRAM_BLK1 Flags: %s,%s\n" 585
573 "pmc551: DRAM_BLK1 Size: %d at %d\n" 586 pci_read_config_dword(dev, PMC551_DRAM_BLK1, &dcmd);
574 "pmc551: DRAM_BLK1 Row MUX: %d, Col MUX: %d\n", 587 printk(KERN_DEBUG "pmc551: DRAM_BLK1 Flags: %s,%s\n"
575 (((0x1<<1)&dcmd) == 0)?"RW":"RO", 588 "pmc551: DRAM_BLK1 Size: %d at %d\n"
576 (((0x1<<0)&dcmd) == 0)?"Off":"On", 589 "pmc551: DRAM_BLK1 Row MUX: %d, Col MUX: %d\n",
577 PMC551_DRAM_BLK_GET_SIZE(dcmd), 590 (((0x1 << 1) & dcmd) == 0) ? "RW" : "RO",
578 ((dcmd>>20)&0x7FF), ((dcmd>>13)&0x7), ((dcmd>>9)&0xF) ); 591 (((0x1 << 0) & dcmd) == 0) ? "Off" : "On",
579 592 PMC551_DRAM_BLK_GET_SIZE(dcmd),
580 pci_read_config_dword( dev, PMC551_DRAM_BLK2, &dcmd ); 593 ((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7),
581 printk(KERN_DEBUG "pmc551: DRAM_BLK2 Flags: %s,%s\n" 594 ((dcmd >> 9) & 0xF));
582 "pmc551: DRAM_BLK2 Size: %d at %d\n" 595
583 "pmc551: DRAM_BLK2 Row MUX: %d, Col MUX: %d\n", 596 pci_read_config_dword(dev, PMC551_DRAM_BLK2, &dcmd);
584 (((0x1<<1)&dcmd) == 0)?"RW":"RO", 597 printk(KERN_DEBUG "pmc551: DRAM_BLK2 Flags: %s,%s\n"
585 (((0x1<<0)&dcmd) == 0)?"Off":"On", 598 "pmc551: DRAM_BLK2 Size: %d at %d\n"
586 PMC551_DRAM_BLK_GET_SIZE(dcmd), 599 "pmc551: DRAM_BLK2 Row MUX: %d, Col MUX: %d\n",
587 ((dcmd>>20)&0x7FF), ((dcmd>>13)&0x7), ((dcmd>>9)&0xF) ); 600 (((0x1 << 1) & dcmd) == 0) ? "RW" : "RO",
588 601 (((0x1 << 0) & dcmd) == 0) ? "Off" : "On",
589 pci_read_config_dword( dev, PMC551_DRAM_BLK3, &dcmd ); 602 PMC551_DRAM_BLK_GET_SIZE(dcmd),
590 printk(KERN_DEBUG "pmc551: DRAM_BLK3 Flags: %s,%s\n" 603 ((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7),
591 "pmc551: DRAM_BLK3 Size: %d at %d\n" 604 ((dcmd >> 9) & 0xF));
592 "pmc551: DRAM_BLK3 Row MUX: %d, Col MUX: %d\n", 605
593 (((0x1<<1)&dcmd) == 0)?"RW":"RO", 606 pci_read_config_dword(dev, PMC551_DRAM_BLK3, &dcmd);
594 (((0x1<<0)&dcmd) == 0)?"Off":"On", 607 printk(KERN_DEBUG "pmc551: DRAM_BLK3 Flags: %s,%s\n"
595 PMC551_DRAM_BLK_GET_SIZE(dcmd), 608 "pmc551: DRAM_BLK3 Size: %d at %d\n"
596 ((dcmd>>20)&0x7FF), ((dcmd>>13)&0x7), ((dcmd>>9)&0xF) ); 609 "pmc551: DRAM_BLK3 Row MUX: %d, Col MUX: %d\n",
597 610 (((0x1 << 1) & dcmd) == 0) ? "RW" : "RO",
598 pci_read_config_word( dev, PCI_COMMAND, &cmd ); 611 (((0x1 << 0) & dcmd) == 0) ? "Off" : "On",
599 printk( KERN_DEBUG "pmc551: Memory Access %s\n", 612 PMC551_DRAM_BLK_GET_SIZE(dcmd),
600 (((0x1<<1)&cmd) == 0)?"off":"on" ); 613 ((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7),
601 printk( KERN_DEBUG "pmc551: I/O Access %s\n", 614 ((dcmd >> 9) & 0xF));
602 (((0x1<<0)&cmd) == 0)?"off":"on" ); 615
603 616 pci_read_config_word(dev, PCI_COMMAND, &cmd);
604 pci_read_config_word( dev, PCI_STATUS, &cmd ); 617 printk(KERN_DEBUG "pmc551: Memory Access %s\n",
605 printk( KERN_DEBUG "pmc551: Devsel %s\n", 618 (((0x1 << 1) & cmd) == 0) ? "off" : "on");
606 ((PCI_STATUS_DEVSEL_MASK&cmd)==0x000)?"Fast": 619 printk(KERN_DEBUG "pmc551: I/O Access %s\n",
607 ((PCI_STATUS_DEVSEL_MASK&cmd)==0x200)?"Medium": 620 (((0x1 << 0) & cmd) == 0) ? "off" : "on");
608 ((PCI_STATUS_DEVSEL_MASK&cmd)==0x400)?"Slow":"Invalid" ); 621
609 622 pci_read_config_word(dev, PCI_STATUS, &cmd);
610 printk( KERN_DEBUG "pmc551: %sFast Back-to-Back\n", 623 printk(KERN_DEBUG "pmc551: Devsel %s\n",
611 ((PCI_COMMAND_FAST_BACK&cmd) == 0)?"Not ":"" ); 624 ((PCI_STATUS_DEVSEL_MASK & cmd) == 0x000) ? "Fast" :
612 625 ((PCI_STATUS_DEVSEL_MASK & cmd) == 0x200) ? "Medium" :
613 pci_read_config_byte(dev, PMC551_SYS_CTRL_REG, &bcmd ); 626 ((PCI_STATUS_DEVSEL_MASK & cmd) == 0x400) ? "Slow" : "Invalid");
614 printk( KERN_DEBUG "pmc551: EEPROM is under %s control\n" 627
615 "pmc551: System Control Register is %slocked to PCI access\n" 628 printk(KERN_DEBUG "pmc551: %sFast Back-to-Back\n",
616 "pmc551: System Control Register is %slocked to EEPROM access\n", 629 ((PCI_COMMAND_FAST_BACK & cmd) == 0) ? "Not " : "");
617 (bcmd&0x1)?"software":"hardware", 630
618 (bcmd&0x20)?"":"un", (bcmd&0x40)?"":"un"); 631 pci_read_config_byte(dev, PMC551_SYS_CTRL_REG, &bcmd);
632 printk(KERN_DEBUG "pmc551: EEPROM is under %s control\n"
633 "pmc551: System Control Register is %slocked to PCI access\n"
634 "pmc551: System Control Register is %slocked to EEPROM access\n",
635 (bcmd & 0x1) ? "software" : "hardware",
636 (bcmd & 0x20) ? "" : "un", (bcmd & 0x40) ? "" : "un");
619#endif 637#endif
620 return size; 638 return size;
621} 639}
622 640
623/* 641/*
624 * Kernel version specific module stuffages 642 * Kernel version specific module stuffages
625 */ 643 */
626 644
627
628MODULE_LICENSE("GPL"); 645MODULE_LICENSE("GPL");
629MODULE_AUTHOR("Mark Ferrell <mferrell@mvista.com>"); 646MODULE_AUTHOR("Mark Ferrell <mferrell@mvista.com>");
630MODULE_DESCRIPTION(PMC551_VERSION); 647MODULE_DESCRIPTION(PMC551_VERSION);
@@ -632,11 +649,11 @@ MODULE_DESCRIPTION(PMC551_VERSION);
632/* 649/*
633 * Stuff these outside the ifdef so as to not bust compiled in driver support 650 * Stuff these outside the ifdef so as to not bust compiled in driver support
634 */ 651 */
635static int msize=0; 652static int msize = 0;
636#if defined(CONFIG_MTD_PMC551_APERTURE_SIZE) 653#if defined(CONFIG_MTD_PMC551_APERTURE_SIZE)
637static int asize=CONFIG_MTD_PMC551_APERTURE_SIZE 654static int asize = CONFIG_MTD_PMC551_APERTURE_SIZE
638#else 655#else
639static int asize=0; 656static int asize = 0;
640#endif 657#endif
641 658
642module_param(msize, int, 0); 659module_param(msize, int, 0);
@@ -649,164 +666,174 @@ MODULE_PARM_DESC(asize, "aperture size, must be <= memsize [1-1024]");
649 */ 666 */
650static int __init init_pmc551(void) 667static int __init init_pmc551(void)
651{ 668{
652 struct pci_dev *PCI_Device = NULL; 669 struct pci_dev *PCI_Device = NULL;
653 struct mypriv *priv; 670 struct mypriv *priv;
654 int count, found=0; 671 int count, found = 0;
655 struct mtd_info *mtd; 672 struct mtd_info *mtd;
656 u32 length = 0; 673 u32 length = 0;
657 674
658 if(msize) { 675 if (msize) {
659 msize = (1 << (ffs(msize) - 1))<<20; 676 msize = (1 << (ffs(msize) - 1)) << 20;
660 if (msize > (1<<30)) { 677 if (msize > (1 << 30)) {
661 printk(KERN_NOTICE "pmc551: Invalid memory size [%d]\n", msize); 678 printk(KERN_NOTICE "pmc551: Invalid memory size [%d]\n",
679 msize);
662 return -EINVAL; 680 return -EINVAL;
663 } 681 }
664 } 682 }
665 683
666 if(asize) { 684 if (asize) {
667 asize = (1 << (ffs(asize) - 1))<<20; 685 asize = (1 << (ffs(asize) - 1)) << 20;
668 if (asize > (1<<30) ) { 686 if (asize > (1 << 30)) {
669 printk(KERN_NOTICE "pmc551: Invalid aperture size [%d]\n", asize); 687 printk(KERN_NOTICE "pmc551: Invalid aperture size "
688 "[%d]\n", asize);
670 return -EINVAL; 689 return -EINVAL;
671 } 690 }
672 } 691 }
673 692
674 printk(KERN_INFO PMC551_VERSION); 693 printk(KERN_INFO PMC551_VERSION);
675 694
676 /* 695 /*
677 * PCU-bus chipset probe. 696 * PCU-bus chipset probe.
678 */ 697 */
679 for( count = 0; count < MAX_MTD_DEVICES; count++ ) { 698 for (count = 0; count < MAX_MTD_DEVICES; count++) {
680 699
681 if ((PCI_Device = pci_find_device(PCI_VENDOR_ID_V3_SEMI, 700 if ((PCI_Device = pci_get_device(PCI_VENDOR_ID_V3_SEMI,
682 PCI_DEVICE_ID_V3_SEMI_V370PDC, 701 PCI_DEVICE_ID_V3_SEMI_V370PDC,
683 PCI_Device ) ) == NULL) { 702 PCI_Device)) == NULL) {
684 break; 703 break;
685 } 704 }
686 705
687 printk(KERN_NOTICE "pmc551: Found PCI V370PDC at 0x%llx\n", 706 printk(KERN_NOTICE "pmc551: Found PCI V370PDC at 0x%llx\n",
688 (unsigned long long)PCI_Device->resource[0].start); 707 (unsigned long long)pci_resource_start(PCI_Device, 0));
689 708
690 /* 709 /*
691 * The PMC551 device acts VERY weird if you don't init it 710 * The PMC551 device acts VERY weird if you don't init it
692 * first. i.e. it will not correctly report devsel. If for 711 * first. i.e. it will not correctly report devsel. If for
693 * some reason the sdram is in a wrote-protected state the 712 * some reason the sdram is in a wrote-protected state the
694 * device will DEVSEL when it is written to causing problems 713 * device will DEVSEL when it is written to causing problems
695 * with the oldproc.c driver in 714 * with the oldproc.c driver in
696 * some kernels (2.2.*) 715 * some kernels (2.2.*)
697 */ 716 */
698 if((length = fixup_pmc551(PCI_Device)) <= 0) { 717 if ((length = fixup_pmc551(PCI_Device)) <= 0) {
699 printk(KERN_NOTICE "pmc551: Cannot init SDRAM\n"); 718 printk(KERN_NOTICE "pmc551: Cannot init SDRAM\n");
700 break; 719 break;
701 } 720 }
702 721
703 /* 722 /*
704 * This is needed until the driver is capable of reading the 723 * This is needed until the driver is capable of reading the
705 * onboard I2C SROM to discover the "real" memory size. 724 * onboard I2C SROM to discover the "real" memory size.
706 */ 725 */
707 if(msize) { 726 if (msize) {
708 length = msize; 727 length = msize;
709 printk(KERN_NOTICE "pmc551: Using specified memory size 0x%x\n", length); 728 printk(KERN_NOTICE "pmc551: Using specified memory "
729 "size 0x%x\n", length);
710 } else { 730 } else {
711 msize = length; 731 msize = length;
712 } 732 }
713 733
714 mtd = kmalloc(sizeof(struct mtd_info), GFP_KERNEL); 734 mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
715 if (!mtd) { 735 if (!mtd) {
716 printk(KERN_NOTICE "pmc551: Cannot allocate new MTD device.\n"); 736 printk(KERN_NOTICE "pmc551: Cannot allocate new MTD "
717 break; 737 "device.\n");
718 } 738 break;
719 739 }
720 memset(mtd, 0, sizeof(struct mtd_info)); 740
721 741 priv = kzalloc(sizeof(struct mypriv), GFP_KERNEL);
722 priv = kmalloc (sizeof(struct mypriv), GFP_KERNEL); 742 if (!priv) {
723 if (!priv) { 743 printk(KERN_NOTICE "pmc551: Cannot allocate new MTD "
724 printk(KERN_NOTICE "pmc551: Cannot allocate new MTD device.\n"); 744 "device.\n");
725 kfree(mtd); 745 kfree(mtd);
726 break; 746 break;
727 } 747 }
728 memset(priv, 0, sizeof(*priv)); 748 mtd->priv = priv;
729 mtd->priv = priv; 749 priv->dev = PCI_Device;
730 priv->dev = PCI_Device; 750
731 751 if (asize > length) {
732 if(asize > length) { 752 printk(KERN_NOTICE "pmc551: reducing aperture size to "
733 printk(KERN_NOTICE "pmc551: reducing aperture size to fit %dM\n",length>>20); 753 "fit %dM\n", length >> 20);
734 priv->asize = asize = length; 754 priv->asize = asize = length;
735 } else if (asize == 0 || asize == length) { 755 } else if (asize == 0 || asize == length) {
736 printk(KERN_NOTICE "pmc551: Using existing aperture size %dM\n", length>>20); 756 printk(KERN_NOTICE "pmc551: Using existing aperture "
757 "size %dM\n", length >> 20);
737 priv->asize = asize = length; 758 priv->asize = asize = length;
738 } else { 759 } else {
739 printk(KERN_NOTICE "pmc551: Using specified aperture size %dM\n", asize>>20); 760 printk(KERN_NOTICE "pmc551: Using specified aperture "
761 "size %dM\n", asize >> 20);
740 priv->asize = asize; 762 priv->asize = asize;
741 } 763 }
742 priv->start = ioremap(((PCI_Device->resource[0].start) 764 priv->start = pci_iomap(PCI_Device, 0, priv->asize);
743 & PCI_BASE_ADDRESS_MEM_MASK),
744 priv->asize);
745 765
746 if (!priv->start) { 766 if (!priv->start) {
747 printk(KERN_NOTICE "pmc551: Unable to map IO space\n"); 767 printk(KERN_NOTICE "pmc551: Unable to map IO space\n");
748 kfree(mtd->priv); 768 kfree(mtd->priv);
749 kfree(mtd); 769 kfree(mtd);
750 break; 770 break;
751 } 771 }
752
753#ifdef CONFIG_MTD_PMC551_DEBUG 772#ifdef CONFIG_MTD_PMC551_DEBUG
754 printk( KERN_DEBUG "pmc551: setting aperture to %d\n", 773 printk(KERN_DEBUG "pmc551: setting aperture to %d\n",
755 ffs(priv->asize>>20)-1); 774 ffs(priv->asize >> 20) - 1);
756#endif 775#endif
757 776
758 priv->base_map0 = ( PMC551_PCI_MEM_MAP_REG_EN 777 priv->base_map0 = (PMC551_PCI_MEM_MAP_REG_EN
759 | PMC551_PCI_MEM_MAP_ENABLE 778 | PMC551_PCI_MEM_MAP_ENABLE
760 | (ffs(priv->asize>>20)-1)<<4 ); 779 | (ffs(priv->asize >> 20) - 1) << 4);
761 priv->curr_map0 = priv->base_map0; 780 priv->curr_map0 = priv->base_map0;
762 pci_write_config_dword ( priv->dev, PMC551_PCI_MEM_MAP0, 781 pci_write_config_dword(priv->dev, PMC551_PCI_MEM_MAP0,
763 priv->curr_map0 ); 782 priv->curr_map0);
764 783
765#ifdef CONFIG_MTD_PMC551_DEBUG 784#ifdef CONFIG_MTD_PMC551_DEBUG
766 printk( KERN_DEBUG "pmc551: aperture set to %d\n", 785 printk(KERN_DEBUG "pmc551: aperture set to %d\n",
767 (priv->base_map0 & 0xF0)>>4 ); 786 (priv->base_map0 & 0xF0) >> 4);
768#endif 787#endif
769 788
770 mtd->size = msize; 789 mtd->size = msize;
771 mtd->flags = MTD_CAP_RAM; 790 mtd->flags = MTD_CAP_RAM;
772 mtd->erase = pmc551_erase; 791 mtd->erase = pmc551_erase;
773 mtd->read = pmc551_read; 792 mtd->read = pmc551_read;
774 mtd->write = pmc551_write; 793 mtd->write = pmc551_write;
775 mtd->point = pmc551_point; 794 mtd->point = pmc551_point;
776 mtd->unpoint = pmc551_unpoint; 795 mtd->unpoint = pmc551_unpoint;
777 mtd->type = MTD_RAM; 796 mtd->type = MTD_RAM;
778 mtd->name = "PMC551 RAM board"; 797 mtd->name = "PMC551 RAM board";
779 mtd->erasesize = 0x10000; 798 mtd->erasesize = 0x10000;
780 mtd->writesize = 1; 799 mtd->writesize = 1;
781 mtd->owner = THIS_MODULE; 800 mtd->owner = THIS_MODULE;
782 801
783 if (add_mtd_device(mtd)) { 802 if (add_mtd_device(mtd)) {
784 printk(KERN_NOTICE "pmc551: Failed to register new device\n"); 803 printk(KERN_NOTICE "pmc551: Failed to register new "
785 iounmap(priv->start); 804 "device\n");
786 kfree(mtd->priv); 805 pci_iounmap(PCI_Device, priv->start);
787 kfree(mtd); 806 kfree(mtd->priv);
788 break; 807 kfree(mtd);
789 } 808 break;
790 printk(KERN_NOTICE "Registered pmc551 memory device.\n"); 809 }
791 printk(KERN_NOTICE "Mapped %dM of memory from 0x%p to 0x%p\n", 810
792 priv->asize>>20, 811 /* Keep a reference as the add_mtd_device worked */
793 priv->start, 812 pci_dev_get(PCI_Device);
794 priv->start + priv->asize); 813
795 printk(KERN_NOTICE "Total memory is %d%c\n", 814 printk(KERN_NOTICE "Registered pmc551 memory device.\n");
796 (length<1024)?length: 815 printk(KERN_NOTICE "Mapped %dM of memory from 0x%p to 0x%p\n",
797 (length<1048576)?length>>10:length>>20, 816 priv->asize >> 20,
798 (length<1024)?'B':(length<1048576)?'K':'M'); 817 priv->start, priv->start + priv->asize);
818 printk(KERN_NOTICE "Total memory is %d%c\n",
819 (length < 1024) ? length :
820 (length < 1048576) ? length >> 10 : length >> 20,
821 (length < 1024) ? 'B' : (length < 1048576) ? 'K' : 'M');
799 priv->nextpmc551 = pmc551list; 822 priv->nextpmc551 = pmc551list;
800 pmc551list = mtd; 823 pmc551list = mtd;
801 found++; 824 found++;
802 } 825 }
826
827 /* Exited early, reference left over */
828 if (PCI_Device)
829 pci_dev_put(PCI_Device);
803 830
804 if( !pmc551list ) { 831 if (!pmc551list) {
805 printk(KERN_NOTICE "pmc551: not detected\n"); 832 printk(KERN_NOTICE "pmc551: not detected\n");
806 return -ENODEV; 833 return -ENODEV;
807 } else { 834 } else {
808 printk(KERN_NOTICE "pmc551: %d pmc551 devices loaded\n", found); 835 printk(KERN_NOTICE "pmc551: %d pmc551 devices loaded\n", found);
809 return 0; 836 return 0;
810 } 837 }
811} 838}
812 839
@@ -815,23 +842,24 @@ static int __init init_pmc551(void)
815 */ 842 */
816static void __exit cleanup_pmc551(void) 843static void __exit cleanup_pmc551(void)
817{ 844{
818 int found=0; 845 int found = 0;
819 struct mtd_info *mtd; 846 struct mtd_info *mtd;
820 struct mypriv *priv; 847 struct mypriv *priv;
821 848
822 while((mtd=pmc551list)) { 849 while ((mtd = pmc551list)) {
823 priv = mtd->priv; 850 priv = mtd->priv;
824 pmc551list = priv->nextpmc551; 851 pmc551list = priv->nextpmc551;
825 852
826 if(priv->start) { 853 if (priv->start) {
827 printk (KERN_DEBUG "pmc551: unmapping %dM starting at 0x%p\n", 854 printk(KERN_DEBUG "pmc551: unmapping %dM starting at "
828 priv->asize>>20, priv->start); 855 "0x%p\n", priv->asize >> 20, priv->start);
829 iounmap (priv->start); 856 pci_iounmap(priv->dev, priv->start);
830 } 857 }
858 pci_dev_put(priv->dev);
831 859
832 kfree (mtd->priv); 860 kfree(mtd->priv);
833 del_mtd_device (mtd); 861 del_mtd_device(mtd);
834 kfree (mtd); 862 kfree(mtd);
835 found++; 863 found++;
836 } 864 }
837 865
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 83d0b2a52527..24747bdc3e19 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -13,13 +13,13 @@ config MTD_COMPLEX_MAPPINGS
13 13
14config MTD_PHYSMAP 14config MTD_PHYSMAP
15 tristate "CFI Flash device in physical memory map" 15 tristate "CFI Flash device in physical memory map"
16 depends on MTD_CFI 16 depends on MTD_CFI || MTD_JEDECPROBE || MTD_ROM
17 help 17 help
18 This provides a 'mapping' driver which allows the CFI probe and 18 This provides a 'mapping' driver which allows the NOR Flash and
19 command set driver code to communicate with flash chips which 19 ROM driver code to communicate with chips which are mapped
20 are mapped physically into the CPU's memory. You will need to 20 physically into the CPU's memory. You will need to configure
21 configure the physical address and size of the flash chips on 21 the physical address and size of the flash chips on your
22 your particular board as well as the bus width, either statically 22 particular board as well as the bus width, either statically
23 with config options or at run-time. 23 with config options or at run-time.
24 24
25config MTD_PHYSMAP_START 25config MTD_PHYSMAP_START
@@ -447,14 +447,6 @@ config MTD_DC21285
447 21285 bridge used with Intel's StrongARM processors. More info at 447 21285 bridge used with Intel's StrongARM processors. More info at
448 <http://www.intel.com/design/bridge/docs/21285_documentation.htm>. 448 <http://www.intel.com/design/bridge/docs/21285_documentation.htm>.
449 449
450config MTD_IQ80310
451 tristate "CFI Flash device mapped on the XScale IQ80310 board"
452 depends on MTD_CFI && ARCH_IQ80310
453 help
454 This enables access routines for the flash chips on the Intel XScale
455 IQ80310 evaluation board. If you have one of these boards and would
456 like to use the flash chips on it, say 'Y'.
457
458config MTD_IXP4XX 450config MTD_IXP4XX
459 tristate "CFI Flash device mapped on Intel IXP4xx based systems" 451 tristate "CFI Flash device mapped on Intel IXP4xx based systems"
460 depends on MTD_CFI && MTD_COMPLEX_MAPPINGS && ARCH_IXP4XX 452 depends on MTD_CFI && MTD_COMPLEX_MAPPINGS && ARCH_IXP4XX
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index ab71f172eb77..191c1928bbec 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_MTD_CFI_FLAGADM) += cfi_flagadm.o
15obj-$(CONFIG_MTD_CSTM_MIPS_IXX) += cstm_mips_ixx.o 15obj-$(CONFIG_MTD_CSTM_MIPS_IXX) += cstm_mips_ixx.o
16obj-$(CONFIG_MTD_DC21285) += dc21285.o 16obj-$(CONFIG_MTD_DC21285) += dc21285.o
17obj-$(CONFIG_MTD_DILNETPC) += dilnetpc.o 17obj-$(CONFIG_MTD_DILNETPC) += dilnetpc.o
18obj-$(CONFIG_MTD_IQ80310) += iq80310.o
19obj-$(CONFIG_MTD_L440GX) += l440gx.o 18obj-$(CONFIG_MTD_L440GX) += l440gx.o
20obj-$(CONFIG_MTD_AMD76XROM) += amd76xrom.o 19obj-$(CONFIG_MTD_AMD76XROM) += amd76xrom.o
21obj-$(CONFIG_MTD_ICHXROM) += ichxrom.o 20obj-$(CONFIG_MTD_ICHXROM) += ichxrom.o
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index 447955be18af..797caffb20b1 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -57,6 +57,7 @@ static void amd76xrom_cleanup(struct amd76xrom_window *window)
57 /* Disable writes through the rom window */ 57 /* Disable writes through the rom window */
58 pci_read_config_byte(window->pdev, 0x40, &byte); 58 pci_read_config_byte(window->pdev, 0x40, &byte);
59 pci_write_config_byte(window->pdev, 0x40, byte & ~1); 59 pci_write_config_byte(window->pdev, 0x40, byte & ~1);
60 pci_dev_put(window->pdev);
60 } 61 }
61 62
62 /* Free all of the mtd devices */ 63 /* Free all of the mtd devices */
@@ -91,7 +92,7 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
91 struct amd76xrom_map_info *map = NULL; 92 struct amd76xrom_map_info *map = NULL;
92 unsigned long map_top; 93 unsigned long map_top;
93 94
94 /* Remember the pci dev I find the window in */ 95 /* Remember the pci dev I find the window in - already have a ref */
95 window->pdev = pdev; 96 window->pdev = pdev;
96 97
97 /* Assume the rom window is properly setup, and find it's size */ 98 /* Assume the rom window is properly setup, and find it's size */
@@ -302,7 +303,7 @@ static int __init init_amd76xrom(void)
302 struct pci_device_id *id; 303 struct pci_device_id *id;
303 pdev = NULL; 304 pdev = NULL;
304 for(id = amd76xrom_pci_tbl; id->vendor; id++) { 305 for(id = amd76xrom_pci_tbl; id->vendor; id++) {
305 pdev = pci_find_device(id->vendor, id->device, NULL); 306 pdev = pci_get_device(id->vendor, id->device, NULL);
306 if (pdev) { 307 if (pdev) {
307 break; 308 break;
308 } 309 }
diff --git a/drivers/mtd/maps/arctic-mtd.c b/drivers/mtd/maps/arctic-mtd.c
index d95ae582fbe9..642d96bc8919 100644
--- a/drivers/mtd/maps/arctic-mtd.c
+++ b/drivers/mtd/maps/arctic-mtd.c
@@ -96,6 +96,8 @@ static struct mtd_partition arctic_partitions[PARTITIONS] = {
96static int __init 96static int __init
97init_arctic_mtd(void) 97init_arctic_mtd(void)
98{ 98{
99 int err = 0;
100
99 printk("%s: 0x%08x at 0x%08x\n", NAME, SIZE, PADDR); 101 printk("%s: 0x%08x at 0x%08x\n", NAME, SIZE, PADDR);
100 102
101 arctic_mtd_map.virt = ioremap(PADDR, SIZE); 103 arctic_mtd_map.virt = ioremap(PADDR, SIZE);
@@ -109,12 +111,20 @@ init_arctic_mtd(void)
109 printk("%s: probing %d-bit flash bus\n", NAME, BUSWIDTH * 8); 111 printk("%s: probing %d-bit flash bus\n", NAME, BUSWIDTH * 8);
110 arctic_mtd = do_map_probe("cfi_probe", &arctic_mtd_map); 112 arctic_mtd = do_map_probe("cfi_probe", &arctic_mtd_map);
111 113
112 if (!arctic_mtd) 114 if (!arctic_mtd) {
115 iounmap((void *) arctic_mtd_map.virt);
113 return -ENXIO; 116 return -ENXIO;
117 }
114 118
115 arctic_mtd->owner = THIS_MODULE; 119 arctic_mtd->owner = THIS_MODULE;
116 120
117 return add_mtd_partitions(arctic_mtd, arctic_partitions, PARTITIONS); 121 err = add_mtd_partitions(arctic_mtd, arctic_partitions, PARTITIONS);
122 if (err) {
123 printk("%s: add_mtd_partitions failed\n", NAME);
124 iounmap((void *) arctic_mtd_map.virt);
125 }
126
127 return err;
118} 128}
119 129
120static void __exit 130static void __exit
diff --git a/drivers/mtd/maps/beech-mtd.c b/drivers/mtd/maps/beech-mtd.c
index 5df7361d1407..a64b1a5ab316 100644
--- a/drivers/mtd/maps/beech-mtd.c
+++ b/drivers/mtd/maps/beech-mtd.c
@@ -72,6 +72,8 @@ static struct mtd_partition beech_partitions[2] = {
72static int __init 72static int __init
73init_beech_mtd(void) 73init_beech_mtd(void)
74{ 74{
75 int err = 0;
76
75 printk("%s: 0x%08x at 0x%08x\n", NAME, SIZE, PADDR); 77 printk("%s: 0x%08x at 0x%08x\n", NAME, SIZE, PADDR);
76 78
77 beech_mtd_map.virt = ioremap(PADDR, SIZE); 79 beech_mtd_map.virt = ioremap(PADDR, SIZE);
@@ -86,12 +88,20 @@ init_beech_mtd(void)
86 printk("%s: probing %d-bit flash bus\n", NAME, BUSWIDTH * 8); 88 printk("%s: probing %d-bit flash bus\n", NAME, BUSWIDTH * 8);
87 beech_mtd = do_map_probe("cfi_probe", &beech_mtd_map); 89 beech_mtd = do_map_probe("cfi_probe", &beech_mtd_map);
88 90
89 if (!beech_mtd) 91 if (!beech_mtd) {
92 iounmap((void *) beech_mtd_map.virt);
90 return -ENXIO; 93 return -ENXIO;
94 }
91 95
92 beech_mtd->owner = THIS_MODULE; 96 beech_mtd->owner = THIS_MODULE;
93 97
94 return add_mtd_partitions(beech_mtd, beech_partitions, 2); 98 err = add_mtd_partitions(beech_mtd, beech_partitions, 2);
99 if (err) {
100 printk("%s: add_mtd_partitions failed\n", NAME);
101 iounmap((void *) beech_mtd_map.virt);
102 }
103
104 return err;
95} 105}
96 106
97static void __exit 107static void __exit
diff --git a/drivers/mtd/maps/cstm_mips_ixx.c b/drivers/mtd/maps/cstm_mips_ixx.c
index aa56defb94c8..d6bef100d69a 100644
--- a/drivers/mtd/maps/cstm_mips_ixx.c
+++ b/drivers/mtd/maps/cstm_mips_ixx.c
@@ -171,7 +171,14 @@ int __init init_cstm_mips_ixx(void)
171 cstm_mips_ixx_map[i].phys = cstm_mips_ixx_board_desc[i].window_addr; 171 cstm_mips_ixx_map[i].phys = cstm_mips_ixx_board_desc[i].window_addr;
172 cstm_mips_ixx_map[i].virt = ioremap(cstm_mips_ixx_board_desc[i].window_addr, cstm_mips_ixx_board_desc[i].window_size); 172 cstm_mips_ixx_map[i].virt = ioremap(cstm_mips_ixx_board_desc[i].window_addr, cstm_mips_ixx_board_desc[i].window_size);
173 if (!cstm_mips_ixx_map[i].virt) { 173 if (!cstm_mips_ixx_map[i].virt) {
174 int j = 0;
174 printk(KERN_WARNING "Failed to ioremap\n"); 175 printk(KERN_WARNING "Failed to ioremap\n");
176 for (j = 0; j < i; j++) {
177 if (cstm_mips_ixx_map[j].virt) {
178 iounmap((void *)cstm_mips_ixx_map[j].virt);
179 cstm_mips_ixx_map[j].virt = 0;
180 }
181 }
175 return -EIO; 182 return -EIO;
176 } 183 }
177 cstm_mips_ixx_map[i].name = cstm_mips_ixx_board_desc[i].name; 184 cstm_mips_ixx_map[i].name = cstm_mips_ixx_board_desc[i].name;
@@ -204,8 +211,15 @@ int __init init_cstm_mips_ixx(void)
204 cstm_mips_ixx_map[i].map_priv_2 = (unsigned long)mymtd; 211 cstm_mips_ixx_map[i].map_priv_2 = (unsigned long)mymtd;
205 add_mtd_partitions(mymtd, parts, cstm_mips_ixx_board_desc[i].num_partitions); 212 add_mtd_partitions(mymtd, parts, cstm_mips_ixx_board_desc[i].num_partitions);
206 } 213 }
207 else 214 else {
208 return -ENXIO; 215 for (i = 0; i < PHYSMAP_NUMBER; i++) {
216 if (cstm_mips_ixx_map[i].virt) {
217 iounmap((void *)cstm_mips_ixx_map[i].virt);
218 cstm_mips_ixx_map[i].virt = 0;
219 }
220 }
221 return -ENXIO;
222 }
209 } 223 }
210 return 0; 224 return 0;
211} 225}
diff --git a/drivers/mtd/maps/ebony.c b/drivers/mtd/maps/ebony.c
index 641e1dd8479e..1488bb92f26f 100644
--- a/drivers/mtd/maps/ebony.c
+++ b/drivers/mtd/maps/ebony.c
@@ -108,6 +108,7 @@ int __init init_ebony(void)
108 ARRAY_SIZE(ebony_small_partitions)); 108 ARRAY_SIZE(ebony_small_partitions));
109 } else { 109 } else {
110 printk("map probe failed for flash\n"); 110 printk("map probe failed for flash\n");
111 iounmap(ebony_small_map.virt);
111 return -ENXIO; 112 return -ENXIO;
112 } 113 }
113 114
@@ -117,6 +118,7 @@ int __init init_ebony(void)
117 118
118 if (!ebony_large_map.virt) { 119 if (!ebony_large_map.virt) {
119 printk("Failed to ioremap flash\n"); 120 printk("Failed to ioremap flash\n");
121 iounmap(ebony_small_map.virt);
120 return -EIO; 122 return -EIO;
121 } 123 }
122 124
@@ -129,6 +131,8 @@ int __init init_ebony(void)
129 ARRAY_SIZE(ebony_large_partitions)); 131 ARRAY_SIZE(ebony_large_partitions));
130 } else { 132 } else {
131 printk("map probe failed for flash\n"); 133 printk("map probe failed for flash\n");
134 iounmap(ebony_small_map.virt);
135 iounmap(ebony_large_map.virt);
132 return -ENXIO; 136 return -ENXIO;
133 } 137 }
134 138
diff --git a/drivers/mtd/maps/fortunet.c b/drivers/mtd/maps/fortunet.c
index c6bf4e1219ef..7c50c271651c 100644
--- a/drivers/mtd/maps/fortunet.c
+++ b/drivers/mtd/maps/fortunet.c
@@ -218,8 +218,11 @@ int __init init_fortunet(void)
218 map_regions[ix].map_info.size); 218 map_regions[ix].map_info.size);
219 if(!map_regions[ix].map_info.virt) 219 if(!map_regions[ix].map_info.virt)
220 { 220 {
221 int j = 0;
221 printk(MTD_FORTUNET_PK "%s flash failed to ioremap!\n", 222 printk(MTD_FORTUNET_PK "%s flash failed to ioremap!\n",
222 map_regions[ix].map_info.name); 223 map_regions[ix].map_info.name);
224 for (j = 0 ; j < ix; j++)
225 iounmap(map_regions[j].map_info.virt);
223 return -ENXIO; 226 return -ENXIO;
224 } 227 }
225 simple_map_init(&map_regions[ix].map_info); 228 simple_map_init(&map_regions[ix].map_info);
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index db4b570d874a..2bb3e63606e5 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -61,6 +61,7 @@ static void ichxrom_cleanup(struct ichxrom_window *window)
61 /* Disable writes through the rom window */ 61 /* Disable writes through the rom window */
62 pci_read_config_word(window->pdev, BIOS_CNTL, &word); 62 pci_read_config_word(window->pdev, BIOS_CNTL, &word);
63 pci_write_config_word(window->pdev, BIOS_CNTL, word & ~1); 63 pci_write_config_word(window->pdev, BIOS_CNTL, word & ~1);
64 pci_dev_put(window->pdev);
64 65
65 /* Free all of the mtd devices */ 66 /* Free all of the mtd devices */
66 list_for_each_entry_safe(map, scratch, &window->maps, list) { 67 list_for_each_entry_safe(map, scratch, &window->maps, list) {
@@ -355,7 +356,7 @@ static int __init init_ichxrom(void)
355 356
356 pdev = NULL; 357 pdev = NULL;
357 for (id = ichxrom_pci_tbl; id->vendor; id++) { 358 for (id = ichxrom_pci_tbl; id->vendor; id++) {
358 pdev = pci_find_device(id->vendor, id->device, NULL); 359 pdev = pci_get_device(id->vendor, id->device, NULL);
359 if (pdev) { 360 if (pdev) {
360 break; 361 break;
361 } 362 }
diff --git a/drivers/mtd/maps/iq80310.c b/drivers/mtd/maps/iq80310.c
deleted file mode 100644
index 62d9e87d84e2..000000000000
--- a/drivers/mtd/maps/iq80310.c
+++ /dev/null
@@ -1,118 +0,0 @@
1/*
2 * $Id: iq80310.c,v 1.21 2005/11/07 11:14:27 gleixner Exp $
3 *
4 * Mapping for the Intel XScale IQ80310 evaluation board
5 *
6 * Author: Nicolas Pitre
7 * Copyright: (C) 2001 MontaVista Software Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/slab.h>
19#include <asm/io.h>
20#include <linux/mtd/mtd.h>
21#include <linux/mtd/map.h>
22#include <linux/mtd/partitions.h>
23
24
25#define WINDOW_ADDR 0
26#define WINDOW_SIZE 8*1024*1024
27#define BUSWIDTH 1
28
29static struct mtd_info *mymtd;
30
31static struct map_info iq80310_map = {
32 .name = "IQ80310 flash",
33 .size = WINDOW_SIZE,
34 .bankwidth = BUSWIDTH,
35 .phys = WINDOW_ADDR
36};
37
38static struct mtd_partition iq80310_partitions[4] = {
39 {
40 .name = "Firmware",
41 .size = 0x00080000,
42 .offset = 0,
43 .mask_flags = MTD_WRITEABLE /* force read-only */
44 },{
45 .name = "Kernel",
46 .size = 0x000a0000,
47 .offset = 0x00080000,
48 },{
49 .name = "Filesystem",
50 .size = 0x00600000,
51 .offset = 0x00120000
52 },{
53 .name = "RedBoot",
54 .size = 0x000e0000,
55 .offset = 0x00720000,
56 .mask_flags = MTD_WRITEABLE
57 }
58};
59
60static struct mtd_info *mymtd;
61static struct mtd_partition *parsed_parts;
62static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
63
64static int __init init_iq80310(void)
65{
66 struct mtd_partition *parts;
67 int nb_parts = 0;
68 int parsed_nr_parts = 0;
69 int ret;
70
71 iq80310_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
72 if (!iq80310_map.virt) {
73 printk("Failed to ioremap\n");
74 return -EIO;
75 }
76 simple_map_init(&iq80310_map);
77
78 mymtd = do_map_probe("cfi_probe", &iq80310_map);
79 if (!mymtd) {
80 iounmap((void *)iq80310_map.virt);
81 return -ENXIO;
82 }
83 mymtd->owner = THIS_MODULE;
84
85 ret = parse_mtd_partitions(mymtd, probes, &parsed_parts, 0);
86
87 if (ret > 0)
88 parsed_nr_parts = ret;
89
90 if (parsed_nr_parts > 0) {
91 parts = parsed_parts;
92 nb_parts = parsed_nr_parts;
93 } else {
94 parts = iq80310_partitions;
95 nb_parts = ARRAY_SIZE(iq80310_partitions);
96 }
97 add_mtd_partitions(mymtd, parts, nb_parts);
98 return 0;
99}
100
101static void __exit cleanup_iq80310(void)
102{
103 if (mymtd) {
104 del_mtd_partitions(mymtd);
105 map_destroy(mymtd);
106 kfree(parsed_parts);
107 }
108 if (iq80310_map.virt)
109 iounmap((void *)iq80310_map.virt);
110}
111
112module_init(init_iq80310);
113module_exit(cleanup_iq80310);
114
115
116MODULE_LICENSE("GPL");
117MODULE_AUTHOR("Nicolas Pitre <nico@cam.org>");
118MODULE_DESCRIPTION("MTD map driver for Intel XScale IQ80310 evaluation board");
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 986c58628390..7a828e3e6446 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -253,7 +253,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
253 /* Use the fast version */ 253 /* Use the fast version */
254 info->map.write = ixp4xx_write16, 254 info->map.write = ixp4xx_write16,
255 255
256 err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0); 256 err = parse_mtd_partitions(info->mtd, probes, &info->partitions, dev->resource->start);
257 if (err > 0) { 257 if (err > 0) {
258 err = add_mtd_partitions(info->mtd, info->partitions, err); 258 err = add_mtd_partitions(info->mtd, info->partitions, err);
259 if(err) 259 if(err)
diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c
index 6b784ef5ee70..67620adf4811 100644
--- a/drivers/mtd/maps/l440gx.c
+++ b/drivers/mtd/maps/l440gx.c
@@ -61,14 +61,17 @@ static int __init init_l440gx(void)
61 struct resource *pm_iobase; 61 struct resource *pm_iobase;
62 __u16 word; 62 __u16 word;
63 63
64 dev = pci_find_device(PCI_VENDOR_ID_INTEL, 64 dev = pci_get_device(PCI_VENDOR_ID_INTEL,
65 PCI_DEVICE_ID_INTEL_82371AB_0, NULL); 65 PCI_DEVICE_ID_INTEL_82371AB_0, NULL);
66 66
67 pm_dev = pci_find_device(PCI_VENDOR_ID_INTEL, 67 pm_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
68 PCI_DEVICE_ID_INTEL_82371AB_3, NULL); 68 PCI_DEVICE_ID_INTEL_82371AB_3, NULL);
69 69
70 pci_dev_put(dev);
71
70 if (!dev || !pm_dev) { 72 if (!dev || !pm_dev) {
71 printk(KERN_NOTICE "L440GX flash mapping: failed to find PIIX4 ISA bridge, cannot continue\n"); 73 printk(KERN_NOTICE "L440GX flash mapping: failed to find PIIX4 ISA bridge, cannot continue\n");
74 pci_dev_put(pm_dev);
72 return -ENODEV; 75 return -ENODEV;
73 } 76 }
74 77
@@ -76,6 +79,7 @@ static int __init init_l440gx(void)
76 79
77 if (!l440gx_map.virt) { 80 if (!l440gx_map.virt) {
78 printk(KERN_WARNING "Failed to ioremap L440GX flash region\n"); 81 printk(KERN_WARNING "Failed to ioremap L440GX flash region\n");
82 pci_dev_put(pm_dev);
79 return -ENOMEM; 83 return -ENOMEM;
80 } 84 }
81 simple_map_init(&l440gx_map); 85 simple_map_init(&l440gx_map);
@@ -99,8 +103,12 @@ static int __init init_l440gx(void)
99 pm_iobase->start += iobase & ~1; 103 pm_iobase->start += iobase & ~1;
100 pm_iobase->end += iobase & ~1; 104 pm_iobase->end += iobase & ~1;
101 105
106 pci_dev_put(pm_dev);
107
102 /* Allocate the resource region */ 108 /* Allocate the resource region */
103 if (pci_assign_resource(pm_dev, PIIXE_IOBASE_RESOURCE) != 0) { 109 if (pci_assign_resource(pm_dev, PIIXE_IOBASE_RESOURCE) != 0) {
110 pci_dev_put(dev);
111 pci_dev_put(pm_dev);
104 printk(KERN_WARNING "Could not allocate pm iobase resource\n"); 112 printk(KERN_WARNING "Could not allocate pm iobase resource\n");
105 iounmap(l440gx_map.virt); 113 iounmap(l440gx_map.virt);
106 return -ENXIO; 114 return -ENXIO;
diff --git a/drivers/mtd/maps/lasat.c b/drivers/mtd/maps/lasat.c
index 1c13d2dc0cdf..e34376321050 100644
--- a/drivers/mtd/maps/lasat.c
+++ b/drivers/mtd/maps/lasat.c
@@ -79,6 +79,7 @@ static int __init init_lasat(void)
79 return 0; 79 return 0;
80 } 80 }
81 81
82 iounmap(lasat_map.virt);
82 return -ENXIO; 83 return -ENXIO;
83} 84}
84 85
@@ -89,6 +90,7 @@ static void __exit cleanup_lasat(void)
89 map_destroy(lasat_mtd); 90 map_destroy(lasat_mtd);
90 } 91 }
91 if (lasat_map.virt) { 92 if (lasat_map.virt) {
93 iounmap(lasat_map.virt);
92 lasat_map.virt = 0; 94 lasat_map.virt = 0;
93 } 95 }
94} 96}
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index 0994b5b2e331..198e840ff6db 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -277,6 +277,7 @@ int __init nettel_init(void)
277 nettel_amd_map.virt = ioremap_nocache(amdaddr, maxsize); 277 nettel_amd_map.virt = ioremap_nocache(amdaddr, maxsize);
278 if (!nettel_amd_map.virt) { 278 if (!nettel_amd_map.virt) {
279 printk("SNAPGEAR: failed to ioremap() BOOTCS\n"); 279 printk("SNAPGEAR: failed to ioremap() BOOTCS\n");
280 iounmap(nettel_mmcrp);
280 return(-EIO); 281 return(-EIO);
281 } 282 }
282 simple_map_init(&nettel_amd_map); 283 simple_map_init(&nettel_amd_map);
@@ -337,7 +338,8 @@ int __init nettel_init(void)
337 nettel_amd_map.virt = NULL; 338 nettel_amd_map.virt = NULL;
338#else 339#else
339 /* Only AMD flash supported */ 340 /* Only AMD flash supported */
340 return(-ENXIO); 341 rc = -ENXIO;
342 goto out_unmap2;
341#endif 343#endif
342 } 344 }
343 345
@@ -361,14 +363,15 @@ int __init nettel_init(void)
361 nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize); 363 nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize);
362 if (!nettel_intel_map.virt) { 364 if (!nettel_intel_map.virt) {
363 printk("SNAPGEAR: failed to ioremap() ROMCS1\n"); 365 printk("SNAPGEAR: failed to ioremap() ROMCS1\n");
364 return(-EIO); 366 rc = -EIO;
367 goto out_unmap2;
365 } 368 }
366 simple_map_init(&nettel_intel_map); 369 simple_map_init(&nettel_intel_map);
367 370
368 intel_mtd = do_map_probe("cfi_probe", &nettel_intel_map); 371 intel_mtd = do_map_probe("cfi_probe", &nettel_intel_map);
369 if (!intel_mtd) { 372 if (!intel_mtd) {
370 iounmap(nettel_intel_map.virt); 373 rc = -ENXIO;
371 return(-ENXIO); 374 goto out_unmap1;
372 } 375 }
373 376
374 /* Set PAR to the detected size */ 377 /* Set PAR to the detected size */
@@ -394,13 +397,14 @@ int __init nettel_init(void)
394 nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize); 397 nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize);
395 if (!nettel_intel_map.virt) { 398 if (!nettel_intel_map.virt) {
396 printk("SNAPGEAR: failed to ioremap() ROMCS1/2\n"); 399 printk("SNAPGEAR: failed to ioremap() ROMCS1/2\n");
397 return(-EIO); 400 rc = -EIO;
401 goto out_unmap2;
398 } 402 }
399 403
400 intel_mtd = do_map_probe("cfi_probe", &nettel_intel_map); 404 intel_mtd = do_map_probe("cfi_probe", &nettel_intel_map);
401 if (! intel_mtd) { 405 if (! intel_mtd) {
402 iounmap((void *) nettel_intel_map.virt); 406 rc = -ENXIO;
403 return(-ENXIO); 407 goto out_unmap1;
404 } 408 }
405 409
406 intel1size = intel_mtd->size - intel0size; 410 intel1size = intel_mtd->size - intel0size;
@@ -456,6 +460,18 @@ int __init nettel_init(void)
456#endif 460#endif
457 461
458 return(rc); 462 return(rc);
463
464#ifdef CONFIG_MTD_CFI_INTELEXT
465out_unmap1:
466 iounmap((void *) nettel_intel_map.virt);
467#endif
468
469out_unmap2:
470 iounmap(nettel_mmcrp);
471 iounmap(nettel_amd_map.virt);
472
473 return(rc);
474
459} 475}
460 476
461/****************************************************************************/ 477/****************************************************************************/
@@ -469,6 +485,10 @@ void __exit nettel_cleanup(void)
469 del_mtd_partitions(amd_mtd); 485 del_mtd_partitions(amd_mtd);
470 map_destroy(amd_mtd); 486 map_destroy(amd_mtd);
471 } 487 }
488 if (nettel_mmcrp) {
489 iounmap(nettel_mmcrp);
490 nettel_mmcrp = NULL;
491 }
472 if (nettel_amd_map.virt) { 492 if (nettel_amd_map.virt) {
473 iounmap(nettel_amd_map.virt); 493 iounmap(nettel_amd_map.virt);
474 nettel_amd_map.virt = NULL; 494 nettel_amd_map.virt = NULL;
diff --git a/drivers/mtd/maps/ocotea.c b/drivers/mtd/maps/ocotea.c
index 2f07602ba940..5522eac8c980 100644
--- a/drivers/mtd/maps/ocotea.c
+++ b/drivers/mtd/maps/ocotea.c
@@ -97,6 +97,7 @@ int __init init_ocotea(void)
97 ARRAY_SIZE(ocotea_small_partitions)); 97 ARRAY_SIZE(ocotea_small_partitions));
98 } else { 98 } else {
99 printk("map probe failed for flash\n"); 99 printk("map probe failed for flash\n");
100 iounmap(ocotea_small_map.virt);
100 return -ENXIO; 101 return -ENXIO;
101 } 102 }
102 103
@@ -106,6 +107,7 @@ int __init init_ocotea(void)
106 107
107 if (!ocotea_large_map.virt) { 108 if (!ocotea_large_map.virt) {
108 printk("Failed to ioremap flash\n"); 109 printk("Failed to ioremap flash\n");
110 iounmap(ocotea_small_map.virt);
109 return -EIO; 111 return -EIO;
110 } 112 }
111 113
@@ -118,6 +120,8 @@ int __init init_ocotea(void)
118 ARRAY_SIZE(ocotea_large_partitions)); 120 ARRAY_SIZE(ocotea_large_partitions));
119 } else { 121 } else {
120 printk("map probe failed for flash\n"); 122 printk("map probe failed for flash\n");
123 iounmap(ocotea_small_map.virt);
124 iounmap(ocotea_large_map.virt);
121 return -ENXIO; 125 return -ENXIO;
122 } 126 }
123 127
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index c861134cbc48..995347b1beba 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -602,6 +602,10 @@ static int pcmciamtd_config(struct pcmcia_device *link)
602 ret = pcmcia_request_configuration(link, &link->conf); 602 ret = pcmcia_request_configuration(link, &link->conf);
603 if(ret != CS_SUCCESS) { 603 if(ret != CS_SUCCESS) {
604 cs_error(link, RequestConfiguration, ret); 604 cs_error(link, RequestConfiguration, ret);
605 if (dev->win_base) {
606 iounmap(dev->win_base);
607 dev->win_base = NULL;
608 }
605 return -ENODEV; 609 return -ENODEV;
606 } 610 }
607 611
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 7799a25a7f2a..bc7cc71788bc 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -158,9 +158,42 @@ err_out:
158 return err; 158 return err;
159} 159}
160 160
161#ifdef CONFIG_PM
162static int physmap_flash_suspend(struct platform_device *dev, pm_message_t state)
163{
164 struct physmap_flash_info *info = platform_get_drvdata(dev);
165 int ret = 0;
166
167 if (info)
168 ret = info->mtd->suspend(info->mtd);
169
170 return ret;
171}
172
173static int physmap_flash_resume(struct platform_device *dev)
174{
175 struct physmap_flash_info *info = platform_get_drvdata(dev);
176 if (info)
177 info->mtd->resume(info->mtd);
178 return 0;
179}
180
181static void physmap_flash_shutdown(struct platform_device *dev)
182{
183 struct physmap_flash_info *info = platform_get_drvdata(dev);
184 if (info && info->mtd->suspend(info->mtd) == 0)
185 info->mtd->resume(info->mtd);
186}
187#endif
188
161static struct platform_driver physmap_flash_driver = { 189static struct platform_driver physmap_flash_driver = {
162 .probe = physmap_flash_probe, 190 .probe = physmap_flash_probe,
163 .remove = physmap_flash_remove, 191 .remove = physmap_flash_remove,
192#ifdef CONFIG_PM
193 .suspend = physmap_flash_suspend,
194 .resume = physmap_flash_resume,
195 .shutdown = physmap_flash_shutdown,
196#endif
164 .driver = { 197 .driver = {
165 .name = "physmap-flash", 198 .name = "physmap-flash",
166 }, 199 },
diff --git a/drivers/mtd/maps/redwood.c b/drivers/mtd/maps/redwood.c
index ec8fdae1dd99..2257d2b500c0 100644
--- a/drivers/mtd/maps/redwood.c
+++ b/drivers/mtd/maps/redwood.c
@@ -126,6 +126,8 @@ static struct mtd_info *redwood_mtd;
126 126
127int __init init_redwood_flash(void) 127int __init init_redwood_flash(void)
128{ 128{
129 int err = 0;
130
129 printk(KERN_NOTICE "redwood: flash mapping: %x at %x\n", 131 printk(KERN_NOTICE "redwood: flash mapping: %x at %x\n",
130 WINDOW_SIZE, WINDOW_ADDR); 132 WINDOW_SIZE, WINDOW_ADDR);
131 133
@@ -141,11 +143,18 @@ int __init init_redwood_flash(void)
141 143
142 if (redwood_mtd) { 144 if (redwood_mtd) {
143 redwood_mtd->owner = THIS_MODULE; 145 redwood_mtd->owner = THIS_MODULE;
144 return add_mtd_partitions(redwood_mtd, 146 err = add_mtd_partitions(redwood_mtd,
145 redwood_flash_partitions, 147 redwood_flash_partitions,
146 NUM_REDWOOD_FLASH_PARTITIONS); 148 NUM_REDWOOD_FLASH_PARTITIONS);
149 if (err) {
150 printk("init_redwood_flash: add_mtd_partitions failed\n");
151 iounmap(redwood_flash_map.virt);
152 }
153 return err;
154
147 } 155 }
148 156
157 iounmap(redwood_flash_map.virt);
149 return -ENXIO; 158 return -ENXIO;
150} 159}
151 160
diff --git a/drivers/mtd/maps/sbc8240.c b/drivers/mtd/maps/sbc8240.c
index 7d0fcf8f4f33..b8c1331b7a04 100644
--- a/drivers/mtd/maps/sbc8240.c
+++ b/drivers/mtd/maps/sbc8240.c
@@ -156,7 +156,7 @@ int __init init_sbc8240_mtd (void)
156 }; 156 };
157 157
158 int devicesfound = 0; 158 int devicesfound = 0;
159 int i; 159 int i,j;
160 160
161 for (i = 0; i < NUM_FLASH_BANKS; i++) { 161 for (i = 0; i < NUM_FLASH_BANKS; i++) {
162 printk (KERN_NOTICE MSG_PREFIX 162 printk (KERN_NOTICE MSG_PREFIX
@@ -166,6 +166,10 @@ int __init init_sbc8240_mtd (void)
166 (unsigned long) ioremap (pt[i].addr, pt[i].size); 166 (unsigned long) ioremap (pt[i].addr, pt[i].size);
167 if (!sbc8240_map[i].map_priv_1) { 167 if (!sbc8240_map[i].map_priv_1) {
168 printk (MSG_PREFIX "failed to ioremap\n"); 168 printk (MSG_PREFIX "failed to ioremap\n");
169 for (j = 0; j < i; j++) {
170 iounmap((void *) sbc8240_map[j].map_priv_1);
171 sbc8240_map[j].map_priv_1 = 0;
172 }
169 return -EIO; 173 return -EIO;
170 } 174 }
171 simple_map_init(&sbc8240_mtd[i]); 175 simple_map_init(&sbc8240_mtd[i]);
@@ -175,6 +179,11 @@ int __init init_sbc8240_mtd (void)
175 if (sbc8240_mtd[i]) { 179 if (sbc8240_mtd[i]) {
176 sbc8240_mtd[i]->module = THIS_MODULE; 180 sbc8240_mtd[i]->module = THIS_MODULE;
177 devicesfound++; 181 devicesfound++;
182 } else {
183 if (sbc8240_map[i].map_priv_1) {
184 iounmap((void *) sbc8240_map[i].map_priv_1);
185 sbc8240_map[i].map_priv_1 = 0;
186 }
178 } 187 }
179 } 188 }
180 189
diff --git a/drivers/mtd/maps/scx200_docflash.c b/drivers/mtd/maps/scx200_docflash.c
index 7391fd544e86..5e2bce22f37c 100644
--- a/drivers/mtd/maps/scx200_docflash.c
+++ b/drivers/mtd/maps/scx200_docflash.c
@@ -87,19 +87,23 @@ static int __init init_scx200_docflash(void)
87 87
88 printk(KERN_DEBUG NAME ": NatSemi SCx200 DOCCS Flash Driver\n"); 88 printk(KERN_DEBUG NAME ": NatSemi SCx200 DOCCS Flash Driver\n");
89 89
90 if ((bridge = pci_find_device(PCI_VENDOR_ID_NS, 90 if ((bridge = pci_get_device(PCI_VENDOR_ID_NS,
91 PCI_DEVICE_ID_NS_SCx200_BRIDGE, 91 PCI_DEVICE_ID_NS_SCx200_BRIDGE,
92 NULL)) == NULL) 92 NULL)) == NULL)
93 return -ENODEV; 93 return -ENODEV;
94 94
95 /* check that we have found the configuration block */ 95 /* check that we have found the configuration block */
96 if (!scx200_cb_present()) 96 if (!scx200_cb_present()) {
97 pci_dev_put(bridge);
97 return -ENODEV; 98 return -ENODEV;
99 }
98 100
99 if (probe) { 101 if (probe) {
100 /* Try to use the present flash mapping if any */ 102 /* Try to use the present flash mapping if any */
101 pci_read_config_dword(bridge, SCx200_DOCCS_BASE, &base); 103 pci_read_config_dword(bridge, SCx200_DOCCS_BASE, &base);
102 pci_read_config_dword(bridge, SCx200_DOCCS_CTRL, &ctrl); 104 pci_read_config_dword(bridge, SCx200_DOCCS_CTRL, &ctrl);
105 pci_dev_put(bridge);
106
103 pmr = inl(scx200_cb_base + SCx200_PMR); 107 pmr = inl(scx200_cb_base + SCx200_PMR);
104 108
105 if (base == 0 109 if (base == 0
@@ -127,6 +131,7 @@ static int __init init_scx200_docflash(void)
127 return -ENOMEM; 131 return -ENOMEM;
128 } 132 }
129 } else { 133 } else {
134 pci_dev_put(bridge);
130 for (u = size; u > 1; u >>= 1) 135 for (u = size; u > 1; u >>= 1)
131 ; 136 ;
132 if (u != 1) { 137 if (u != 1) {
diff --git a/drivers/mtd/maps/walnut.c b/drivers/mtd/maps/walnut.c
index ec80eec376bf..ca932122fb64 100644
--- a/drivers/mtd/maps/walnut.c
+++ b/drivers/mtd/maps/walnut.c
@@ -68,6 +68,7 @@ int __init init_walnut(void)
68 68
69 if (WALNUT_FLASH_ONBD_N(fpga_brds1)) { 69 if (WALNUT_FLASH_ONBD_N(fpga_brds1)) {
70 printk("The on-board flash is disabled (U79 sw 5)!"); 70 printk("The on-board flash is disabled (U79 sw 5)!");
71 iounmap(fpga_status_adr);
71 return -EIO; 72 return -EIO;
72 } 73 }
73 if (WALNUT_FLASH_SRAM_SEL(fpga_brds1)) 74 if (WALNUT_FLASH_SRAM_SEL(fpga_brds1))
@@ -81,6 +82,7 @@ int __init init_walnut(void)
81 82
82 if (!walnut_map.virt) { 83 if (!walnut_map.virt) {
83 printk("Failed to ioremap flash.\n"); 84 printk("Failed to ioremap flash.\n");
85 iounmap(fpga_status_adr);
84 return -EIO; 86 return -EIO;
85 } 87 }
86 88
@@ -93,9 +95,11 @@ int __init init_walnut(void)
93 ARRAY_SIZE(walnut_partitions)); 95 ARRAY_SIZE(walnut_partitions));
94 } else { 96 } else {
95 printk("map probe failed for flash\n"); 97 printk("map probe failed for flash\n");
98 iounmap(fpga_status_adr);
96 return -ENXIO; 99 return -ENXIO;
97 } 100 }
98 101
102 iounmap(fpga_status_adr);
99 return 0; 103 return 0;
100} 104}
101 105
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index fb8b4f7e48d3..5b6acfcb2b88 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -62,15 +62,12 @@ static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
62 struct mtd_info *mtd = mfi->mtd; 62 struct mtd_info *mtd = mfi->mtd;
63 63
64 switch (orig) { 64 switch (orig) {
65 case 0: 65 case SEEK_SET:
66 /* SEEK_SET */
67 break; 66 break;
68 case 1: 67 case SEEK_CUR:
69 /* SEEK_CUR */
70 offset += file->f_pos; 68 offset += file->f_pos;
71 break; 69 break;
72 case 2: 70 case SEEK_END:
73 /* SEEK_END */
74 offset += mtd->size; 71 offset += mtd->size;
75 break; 72 break;
76 default: 73 default:
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 168d3ba063c3..c4d26de74349 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -57,6 +57,16 @@ int add_mtd_device(struct mtd_info *mtd)
57 mtd->index = i; 57 mtd->index = i;
58 mtd->usecount = 0; 58 mtd->usecount = 0;
59 59
60 /* Some chips always power up locked. Unlock them now */
61 if ((mtd->flags & MTD_WRITEABLE)
62 && (mtd->flags & MTD_STUPID_LOCK) && mtd->unlock) {
63 if (mtd->unlock(mtd, 0, mtd->size))
64 printk(KERN_WARNING
65 "%s: unlock failed, "
66 "writes may not work\n",
67 mtd->name);
68 }
69
60 DEBUG(0, "mtd: Giving out device %d to %s\n",i, mtd->name); 70 DEBUG(0, "mtd: Giving out device %d to %s\n",i, mtd->name);
61 /* No need to get a refcount on the module containing 71 /* No need to get a refcount on the module containing
62 the notifier, since we hold the mtd_table_mutex */ 72 the notifier, since we hold the mtd_table_mutex */
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 3db77eec0ed2..c99302ed3823 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -11,7 +11,7 @@ config MTD_NAND
11 help 11 help
12 This enables support for accessing all type of NAND flash 12 This enables support for accessing all type of NAND flash
13 devices. For further information see 13 devices. For further information see
14 <http://www.linux-mtd.infradead.org/tech/nand.html>. 14 <http://www.linux-mtd.infradead.org/doc/nand.html>.
15 15
16config MTD_NAND_VERIFY_WRITE 16config MTD_NAND_VERIFY_WRITE
17 bool "Verify NAND page writes" 17 bool "Verify NAND page writes"
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 31228334da12..09e421a96893 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -21,18 +21,7 @@
21#include <linux/version.h> 21#include <linux/version.h>
22#include <asm/io.h> 22#include <asm/io.h>
23 23
24/* fixme: this is ugly */
25#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 0)
26#include <asm/mach-au1x00/au1xxx.h> 24#include <asm/mach-au1x00/au1xxx.h>
27#else
28#include <asm/au1000.h>
29#ifdef CONFIG_MIPS_PB1550
30#include <asm/pb1550.h>
31#endif
32#ifdef CONFIG_MIPS_DB1550
33#include <asm/db1x00.h>
34#endif
35#endif
36 25
37/* 26/*
38 * MTD structure for NAND controller 27 * MTD structure for NAND controller
diff --git a/drivers/mtd/nand/edb7312.c b/drivers/mtd/nand/edb7312.c
index 516c0e5e564c..12017f3c6bd6 100644
--- a/drivers/mtd/nand/edb7312.c
+++ b/drivers/mtd/nand/edb7312.c
@@ -198,6 +198,9 @@ static void __exit ep7312_cleanup(void)
198 /* Release resources, unregister device */ 198 /* Release resources, unregister device */
199 nand_release(ap7312_mtd); 199 nand_release(ap7312_mtd);
200 200
201 /* Release io resource */
202 iounmap((void *)this->IO_ADDR_R);
203
201 /* Free the MTD device structure */ 204 /* Free the MTD device structure */
202 kfree(ep7312_mtd); 205 kfree(ep7312_mtd);
203} 206}
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 0a54d003ef34..975b2ef61121 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2224,7 +2224,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2224 } 2224 }
2225 2225
2226 /* Try to identify manufacturer */ 2226 /* Try to identify manufacturer */
2227 for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_id++) { 2227 for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
2228 if (nand_manuf_ids[maf_idx].id == *maf_id) 2228 if (nand_manuf_ids[maf_idx].id == *maf_id)
2229 break; 2229 break;
2230 } 2230 }
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index e5bd88f2d560..039c759cfbfc 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -168,7 +168,7 @@ static void ndfc_chip_init(struct ndfc_nand_mtd *mtd)
168 chip->ecc.mode = NAND_ECC_HW; 168 chip->ecc.mode = NAND_ECC_HW;
169 chip->ecc.size = 256; 169 chip->ecc.size = 256;
170 chip->ecc.bytes = 3; 170 chip->ecc.bytes = 3;
171 chip->ecclayout = mtd->pl_chip->ecclayout; 171 chip->ecclayout = chip->ecc.layout = mtd->pl_chip->ecclayout;
172 mtd->mtd.priv = chip; 172 mtd->mtd.priv = chip;
173 mtd->mtd.owner = THIS_MODULE; 173 mtd->mtd.owner = THIS_MODULE;
174} 174}
diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c
index 22fa65c12ab9..eb7d4d443deb 100644
--- a/drivers/mtd/nand/ppchameleonevb.c
+++ b/drivers/mtd/nand/ppchameleonevb.c
@@ -276,6 +276,7 @@ static int __init ppchameleonevb_init(void)
276 /* Scan to find existence of the device (it could not be mounted) */ 276 /* Scan to find existence of the device (it could not be mounted) */
277 if (nand_scan(ppchameleon_mtd, 1)) { 277 if (nand_scan(ppchameleon_mtd, 1)) {
278 iounmap((void *)ppchameleon_fio_base); 278 iounmap((void *)ppchameleon_fio_base);
279 ppchameleon_fio_base = NULL;
279 kfree(ppchameleon_mtd); 280 kfree(ppchameleon_mtd);
280 goto nand_evb_init; 281 goto nand_evb_init;
281 } 282 }
@@ -314,6 +315,8 @@ static int __init ppchameleonevb_init(void)
314 ppchameleonevb_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); 315 ppchameleonevb_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
315 if (!ppchameleonevb_mtd) { 316 if (!ppchameleonevb_mtd) {
316 printk("Unable to allocate PPChameleonEVB NAND MTD device structure.\n"); 317 printk("Unable to allocate PPChameleonEVB NAND MTD device structure.\n");
318 if (ppchameleon_fio_base)
319 iounmap(ppchameleon_fio_base);
317 return -ENOMEM; 320 return -ENOMEM;
318 } 321 }
319 322
@@ -322,6 +325,8 @@ static int __init ppchameleonevb_init(void)
322 if (!ppchameleonevb_fio_base) { 325 if (!ppchameleonevb_fio_base) {
323 printk("ioremap PPChameleonEVB NAND flash failed\n"); 326 printk("ioremap PPChameleonEVB NAND flash failed\n");
324 kfree(ppchameleonevb_mtd); 327 kfree(ppchameleonevb_mtd);
328 if (ppchameleon_fio_base)
329 iounmap(ppchameleon_fio_base);
325 return -EIO; 330 return -EIO;
326 } 331 }
327 332
@@ -378,6 +383,8 @@ static int __init ppchameleonevb_init(void)
378 if (nand_scan(ppchameleonevb_mtd, 1)) { 383 if (nand_scan(ppchameleonevb_mtd, 1)) {
379 iounmap((void *)ppchameleonevb_fio_base); 384 iounmap((void *)ppchameleonevb_fio_base);
380 kfree(ppchameleonevb_mtd); 385 kfree(ppchameleonevb_mtd);
386 if (ppchameleon_fio_base)
387 iounmap(ppchameleon_fio_base);
381 return -ENXIO; 388 return -ENXIO;
382 } 389 }
383#ifdef CONFIG_MTD_PARTITIONS 390#ifdef CONFIG_MTD_PARTITIONS
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index fbeedc3184e9..51c7288ab49a 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -78,7 +78,7 @@ static struct mtd_partition sharpsl_nand_default_partition_info[] = {
78/* 78/*
79 * hardware specific access to control-lines 79 * hardware specific access to control-lines
80 * ctrl: 80 * ctrl:
81 * NAND_CNE: bit 0 -> bit 0 & 4 81 * NAND_CNE: bit 0 -> ! bit 0 & 4
82 * NAND_CLE: bit 1 -> bit 1 82 * NAND_CLE: bit 1 -> bit 1
83 * NAND_ALE: bit 2 -> bit 2 83 * NAND_ALE: bit 2 -> bit 2
84 * 84 *
@@ -92,7 +92,10 @@ static void sharpsl_nand_hwcontrol(struct mtd_info *mtd, int cmd,
92 unsigned char bits = ctrl & 0x07; 92 unsigned char bits = ctrl & 0x07;
93 93
94 bits |= (ctrl & 0x01) << 4; 94 bits |= (ctrl & 0x01) << 4;
95 writeb((readb(FLASHCTL) & 0x17) | bits, FLASHCTL); 95
96 bits ^= 0x11;
97
98 writeb((readb(FLASHCTL) & ~0x17) | bits, FLASHCTL);
96 } 99 }
97 100
98 if (cmd != NAND_CMD_NONE) 101 if (cmd != NAND_CMD_NONE)
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
new file mode 100644
index 000000000000..79d3bb659bfe
--- /dev/null
+++ b/drivers/mtd/ssfdc.c
@@ -0,0 +1,474 @@
1/*
2 * Linux driver for SSFDC Flash Translation Layer (Read only)
3 * (c) 2005 Eptar srl
4 * Author: Claudio Lanconelli <lanconelli.claudio@eptar.com>
5 *
6 * Based on NTFL and MTDBLOCK_RO drivers
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/slab.h>
17#include <linux/hdreg.h>
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/nand.h>
20#include <linux/mtd/blktrans.h>
21
22struct ssfdcr_record {
23 struct mtd_blktrans_dev mbd;
24 int usecount;
25 unsigned char heads;
26 unsigned char sectors;
27 unsigned short cylinders;
28 int cis_block; /* block n. containing CIS/IDI */
29 int erase_size; /* phys_block_size */
30 unsigned short *logic_block_map; /* all zones (max 8192 phys blocks on
31 the 128MiB) */
32 int map_len; /* n. phys_blocks on the card */
33};
34
35#define SSFDCR_MAJOR 257
36#define SSFDCR_PARTN_BITS 3
37
38#define SECTOR_SIZE 512
39#define SECTOR_SHIFT 9
40#define OOB_SIZE 16
41
42#define MAX_LOGIC_BLK_PER_ZONE 1000
43#define MAX_PHYS_BLK_PER_ZONE 1024
44
45#define KiB(x) ( (x) * 1024L )
46#define MiB(x) ( KiB(x) * 1024L )
47
48/** CHS Table
49 1MiB 2MiB 4MiB 8MiB 16MiB 32MiB 64MiB 128MiB
50NCylinder 125 125 250 250 500 500 500 500
51NHead 4 4 4 4 4 8 8 16
52NSector 4 8 8 16 16 16 32 32
53SumSector 2,000 4,000 8,000 16,000 32,000 64,000 128,000 256,000
54SectorSize 512 512 512 512 512 512 512 512
55**/
56
57typedef struct {
58 unsigned long size;
59 unsigned short cyl;
60 unsigned char head;
61 unsigned char sec;
62} chs_entry_t;
63
64/* Must be ordered by size */
65static const chs_entry_t chs_table[] = {
66 { MiB( 1), 125, 4, 4 },
67 { MiB( 2), 125, 4, 8 },
68 { MiB( 4), 250, 4, 8 },
69 { MiB( 8), 250, 4, 16 },
70 { MiB( 16), 500, 4, 16 },
71 { MiB( 32), 500, 8, 16 },
72 { MiB( 64), 500, 8, 32 },
73 { MiB(128), 500, 16, 32 },
74 { 0 },
75};
76
77static int get_chs(unsigned long size, unsigned short *cyl, unsigned char *head,
78 unsigned char *sec)
79{
80 int k;
81 int found = 0;
82
83 k = 0;
84 while (chs_table[k].size > 0 && size > chs_table[k].size)
85 k++;
86
87 if (chs_table[k].size > 0) {
88 if (cyl)
89 *cyl = chs_table[k].cyl;
90 if (head)
91 *head = chs_table[k].head;
92 if (sec)
93 *sec = chs_table[k].sec;
94 found = 1;
95 }
96
97 return found;
98}
99
100/* These bytes are the signature for the CIS/IDI sector */
101static const uint8_t cis_numbers[] = {
102 0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
103};
104
105/* Read and check for a valid CIS sector */
106static int get_valid_cis_sector(struct mtd_info *mtd)
107{
108 int ret, k, cis_sector;
109 size_t retlen;
110 loff_t offset;
111 uint8_t *sect_buf;
112
113 cis_sector = -1;
114
115 sect_buf = kmalloc(SECTOR_SIZE, GFP_KERNEL);
116 if (!sect_buf)
117 goto out;
118
119 /*
120 * Look for CIS/IDI sector on the first GOOD block (give up after 4 bad
121 * blocks). If the first good block doesn't contain CIS number the flash
122 * is not SSFDC formatted
123 */
124 for (k = 0, offset = 0; k < 4; k++, offset += mtd->erasesize) {
125 if (!mtd->block_isbad(mtd, offset)) {
126 ret = mtd->read(mtd, offset, SECTOR_SIZE, &retlen,
127 sect_buf);
128
129 /* CIS pattern match on the sector buffer */
130 if (ret < 0 || retlen != SECTOR_SIZE) {
131 printk(KERN_WARNING
132 "SSFDC_RO:can't read CIS/IDI sector\n");
133 } else if (!memcmp(sect_buf, cis_numbers,
134 sizeof(cis_numbers))) {
135 /* Found */
136 cis_sector = (int)(offset >> SECTOR_SHIFT);
137 } else {
138 DEBUG(MTD_DEBUG_LEVEL1,
139 "SSFDC_RO: CIS/IDI sector not found"
140 " on %s (mtd%d)\n", mtd->name,
141 mtd->index);
142 }
143 break;
144 }
145 }
146
147 kfree(sect_buf);
148 out:
149 return cis_sector;
150}
151
152/* Read physical sector (wrapper to MTD_READ) */
153static int read_physical_sector(struct mtd_info *mtd, uint8_t *sect_buf,
154 int sect_no)
155{
156 int ret;
157 size_t retlen;
158 loff_t offset = (loff_t)sect_no << SECTOR_SHIFT;
159
160 ret = mtd->read(mtd, offset, SECTOR_SIZE, &retlen, sect_buf);
161 if (ret < 0 || retlen != SECTOR_SIZE)
162 return -1;
163
164 return 0;
165}
166
167/* Read redundancy area (wrapper to MTD_READ_OOB */
168static int read_raw_oob(struct mtd_info *mtd, loff_t offs, uint8_t *buf)
169{
170 struct mtd_oob_ops ops;
171 int ret;
172
173 ops.mode = MTD_OOB_RAW;
174 ops.ooboffs = 0;
175 ops.ooblen = mtd->oobsize;
176 ops.len = OOB_SIZE;
177 ops.oobbuf = buf;
178 ops.datbuf = NULL;
179
180 ret = mtd->read_oob(mtd, offs, &ops);
181 if (ret < 0 || ops.retlen != OOB_SIZE)
182 return -1;
183
184 return 0;
185}
186
187/* Parity calculator on a word of n bit size */
188static int get_parity(int number, int size)
189{
190 int k;
191 int parity;
192
193 parity = 1;
194 for (k = 0; k < size; k++) {
195 parity += (number >> k);
196 parity &= 1;
197 }
198 return parity;
199}
200
201/* Read and validate the logical block address field stored in the OOB */
202static int get_logical_address(uint8_t *oob_buf)
203{
204 int block_address, parity;
205 int offset[2] = {6, 11}; /* offset of the 2 address fields within OOB */
206 int j;
207 int ok = 0;
208
209 /*
210 * Look for the first valid logical address
211 * Valid address has fixed pattern on most significant bits and
212 * parity check
213 */
214 for (j = 0; j < ARRAY_SIZE(offset); j++) {
215 block_address = ((int)oob_buf[offset[j]] << 8) |
216 oob_buf[offset[j]+1];
217
218 /* Check for the signature bits in the address field (MSBits) */
219 if ((block_address & ~0x7FF) == 0x1000) {
220 parity = block_address & 0x01;
221 block_address &= 0x7FF;
222 block_address >>= 1;
223
224 if (get_parity(block_address, 10) != parity) {
225 DEBUG(MTD_DEBUG_LEVEL0,
226 "SSFDC_RO: logical address field%d"
227 "parity error(0x%04X)\n", j+1,
228 block_address);
229 } else {
230 ok = 1;
231 break;
232 }
233 }
234 }
235
236 if (!ok)
237 block_address = -2;
238
239 DEBUG(MTD_DEBUG_LEVEL3, "SSFDC_RO: get_logical_address() %d\n",
240 block_address);
241
242 return block_address;
243}
244
245/* Build the logic block map */
246static int build_logical_block_map(struct ssfdcr_record *ssfdc)
247{
248 unsigned long offset;
249 uint8_t oob_buf[OOB_SIZE];
250 int ret, block_address, phys_block;
251 struct mtd_info *mtd = ssfdc->mbd.mtd;
252
253 DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: build_block_map() nblks=%d (%luK)\n",
254 ssfdc->map_len,
255 (unsigned long)ssfdc->map_len * ssfdc->erase_size / 1024);
256
257 /* Scan every physical block, skip CIS block */
258 for (phys_block = ssfdc->cis_block + 1; phys_block < ssfdc->map_len;
259 phys_block++) {
260 offset = (unsigned long)phys_block * ssfdc->erase_size;
261 if (mtd->block_isbad(mtd, offset))
262 continue; /* skip bad blocks */
263
264 ret = read_raw_oob(mtd, offset, oob_buf);
265 if (ret < 0) {
266 DEBUG(MTD_DEBUG_LEVEL0,
267 "SSFDC_RO: mtd read_oob() failed at %lu\n",
268 offset);
269 return -1;
270 }
271 block_address = get_logical_address(oob_buf);
272
273 /* Skip invalid addresses */
274 if (block_address >= 0 &&
275 block_address < MAX_LOGIC_BLK_PER_ZONE) {
276 int zone_index;
277
278 zone_index = phys_block / MAX_PHYS_BLK_PER_ZONE;
279 block_address += zone_index * MAX_LOGIC_BLK_PER_ZONE;
280 ssfdc->logic_block_map[block_address] =
281 (unsigned short)phys_block;
282
283 DEBUG(MTD_DEBUG_LEVEL2,
284 "SSFDC_RO: build_block_map() phys_block=%d,"
285 "logic_block_addr=%d, zone=%d\n",
286 phys_block, block_address, zone_index);
287 }
288 }
289 return 0;
290}
291
292static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
293{
294 struct ssfdcr_record *ssfdc;
295 int cis_sector;
296
297 /* Check for small page NAND flash */
298 if (mtd->type != MTD_NANDFLASH || mtd->oobsize != OOB_SIZE)
299 return;
300
301 /* Check for SSDFC format by reading CIS/IDI sector */
302 cis_sector = get_valid_cis_sector(mtd);
303 if (cis_sector == -1)
304 return;
305
306 ssfdc = kzalloc(sizeof(struct ssfdcr_record), GFP_KERNEL);
307 if (!ssfdc) {
308 printk(KERN_WARNING
309 "SSFDC_RO: out of memory for data structures\n");
310 return;
311 }
312
313 ssfdc->mbd.mtd = mtd;
314 ssfdc->mbd.devnum = -1;
315 ssfdc->mbd.blksize = SECTOR_SIZE;
316 ssfdc->mbd.tr = tr;
317 ssfdc->mbd.readonly = 1;
318
319 ssfdc->cis_block = cis_sector / (mtd->erasesize >> SECTOR_SHIFT);
320 ssfdc->erase_size = mtd->erasesize;
321 ssfdc->map_len = mtd->size / mtd->erasesize;
322
323 DEBUG(MTD_DEBUG_LEVEL1,
324 "SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n",
325 ssfdc->cis_block, ssfdc->erase_size, ssfdc->map_len,
326 (ssfdc->map_len + MAX_PHYS_BLK_PER_ZONE - 1) /
327 MAX_PHYS_BLK_PER_ZONE);
328
329 /* Set geometry */
330 ssfdc->heads = 16;
331 ssfdc->sectors = 32;
332 get_chs(mtd->size, NULL, &ssfdc->heads, &ssfdc->sectors);
333 ssfdc->cylinders = (unsigned short)((mtd->size >> SECTOR_SHIFT) /
334 ((long)ssfdc->sectors * (long)ssfdc->heads));
335
336 DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n",
337 ssfdc->cylinders, ssfdc->heads , ssfdc->sectors,
338 (long)ssfdc->cylinders * (long)ssfdc->heads *
339 (long)ssfdc->sectors);
340
341 ssfdc->mbd.size = (long)ssfdc->heads * (long)ssfdc->cylinders *
342 (long)ssfdc->sectors;
343
344 /* Allocate logical block map */
345 ssfdc->logic_block_map = kmalloc(sizeof(ssfdc->logic_block_map[0]) *
346 ssfdc->map_len, GFP_KERNEL);
347 if (!ssfdc->logic_block_map) {
348 printk(KERN_WARNING
349 "SSFDC_RO: out of memory for data structures\n");
350 goto out_err;
351 }
352 memset(ssfdc->logic_block_map, 0xff, sizeof(ssfdc->logic_block_map[0]) *
353 ssfdc->map_len);
354
355 /* Build logical block map */
356 if (build_logical_block_map(ssfdc) < 0)
357 goto out_err;
358
359 /* Register device + partitions */
360 if (add_mtd_blktrans_dev(&ssfdc->mbd))
361 goto out_err;
362
363 printk(KERN_INFO "SSFDC_RO: Found ssfdc%c on mtd%d (%s)\n",
364 ssfdc->mbd.devnum + 'a', mtd->index, mtd->name);
365 return;
366
367out_err:
368 kfree(ssfdc->logic_block_map);
369 kfree(ssfdc);
370}
371
372static void ssfdcr_remove_dev(struct mtd_blktrans_dev *dev)
373{
374 struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev;
375
376 DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: remove_dev (i=%d)\n", dev->devnum);
377
378 del_mtd_blktrans_dev(dev);
379 kfree(ssfdc->logic_block_map);
380 kfree(ssfdc);
381}
382
383static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,
384 unsigned long logic_sect_no, char *buf)
385{
386 struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev;
387 int sectors_per_block, offset, block_address;
388
389 sectors_per_block = ssfdc->erase_size >> SECTOR_SHIFT;
390 offset = (int)(logic_sect_no % sectors_per_block);
391 block_address = (int)(logic_sect_no / sectors_per_block);
392
393 DEBUG(MTD_DEBUG_LEVEL3,
394 "SSFDC_RO: ssfdcr_readsect(%lu) sec_per_blk=%d, ofst=%d,"
395 " block_addr=%d\n", logic_sect_no, sectors_per_block, offset,
396 block_address);
397
398 if (block_address >= ssfdc->map_len)
399 BUG();
400
401 block_address = ssfdc->logic_block_map[block_address];
402
403 DEBUG(MTD_DEBUG_LEVEL3,
404 "SSFDC_RO: ssfdcr_readsect() phys_block_addr=%d\n",
405 block_address);
406
407 if (block_address < 0xffff) {
408 unsigned long sect_no;
409
410 sect_no = (unsigned long)block_address * sectors_per_block +
411 offset;
412
413 DEBUG(MTD_DEBUG_LEVEL3,
414 "SSFDC_RO: ssfdcr_readsect() phys_sect_no=%lu\n",
415 sect_no);
416
417 if (read_physical_sector(ssfdc->mbd.mtd, buf, sect_no) < 0)
418 return -EIO;
419 } else {
420 memset(buf, 0xff, SECTOR_SIZE);
421 }
422
423 return 0;
424}
425
426static int ssfdcr_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
427{
428 struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev;
429
430 DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: ssfdcr_getgeo() C=%d, H=%d, S=%d\n",
431 ssfdc->cylinders, ssfdc->heads, ssfdc->sectors);
432
433 geo->heads = ssfdc->heads;
434 geo->sectors = ssfdc->sectors;
435 geo->cylinders = ssfdc->cylinders;
436
437 return 0;
438}
439
440/****************************************************************************
441 *
442 * Module stuff
443 *
444 ****************************************************************************/
445
446static struct mtd_blktrans_ops ssfdcr_tr = {
447 .name = "ssfdc",
448 .major = SSFDCR_MAJOR,
449 .part_bits = SSFDCR_PARTN_BITS,
450 .getgeo = ssfdcr_getgeo,
451 .readsect = ssfdcr_readsect,
452 .add_mtd = ssfdcr_add_mtd,
453 .remove_dev = ssfdcr_remove_dev,
454 .owner = THIS_MODULE,
455};
456
457static int __init init_ssfdcr(void)
458{
459 printk(KERN_INFO "SSFDC read-only Flash Translation layer\n");
460
461 return register_mtd_blktrans(&ssfdcr_tr);
462}
463
464static void __exit cleanup_ssfdcr(void)
465{
466 deregister_mtd_blktrans(&ssfdcr_tr);
467}
468
469module_init(init_ssfdcr);
470module_exit(cleanup_ssfdcr);
471
472MODULE_LICENSE("GPL");
473MODULE_AUTHOR("Claudio Lanconelli <lanconelli.claudio@eptar.com>");
474MODULE_DESCRIPTION("Flash Translation Layer for read-only SSFDC SmartMedia card");
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 80e8ca013e44..29dede2eaa85 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -2077,7 +2077,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2077 2077
2078 vp->tx_ring[entry].next = 0; 2078 vp->tx_ring[entry].next = 0;
2079#if DO_ZEROCOPY 2079#if DO_ZEROCOPY
2080 if (skb->ip_summed != CHECKSUM_HW) 2080 if (skb->ip_summed != CHECKSUM_PARTIAL)
2081 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); 2081 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
2082 else 2082 else
2083 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); 2083 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 1428bb7715af..a48b211c489d 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -813,7 +813,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
813 813
814 if (mss) 814 if (mss)
815 flags |= LargeSend | ((mss & MSSMask) << MSSShift); 815 flags |= LargeSend | ((mss & MSSMask) << MSSShift);
816 else if (skb->ip_summed == CHECKSUM_HW) { 816 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
817 const struct iphdr *ip = skb->nh.iph; 817 const struct iphdr *ip = skb->nh.iph;
818 if (ip->protocol == IPPROTO_TCP) 818 if (ip->protocol == IPPROTO_TCP)
819 flags |= IPCS | TCPCS; 819 flags |= IPCS | TCPCS;
@@ -867,7 +867,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
867 if (mss) 867 if (mss)
868 ctrl |= LargeSend | 868 ctrl |= LargeSend |
869 ((mss & MSSMask) << MSSShift); 869 ((mss & MSSMask) << MSSShift);
870 else if (skb->ip_summed == CHECKSUM_HW) { 870 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
871 if (ip->protocol == IPPROTO_TCP) 871 if (ip->protocol == IPPROTO_TCP)
872 ctrl |= IPCS | TCPCS; 872 ctrl |= IPCS | TCPCS;
873 else if (ip->protocol == IPPROTO_UDP) 873 else if (ip->protocol == IPPROTO_UDP)
@@ -898,7 +898,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
898 txd->addr = cpu_to_le64(first_mapping); 898 txd->addr = cpu_to_le64(first_mapping);
899 wmb(); 899 wmb();
900 900
901 if (skb->ip_summed == CHECKSUM_HW) { 901 if (skb->ip_summed == CHECKSUM_PARTIAL) {
902 if (ip->protocol == IPPROTO_TCP) 902 if (ip->protocol == IPPROTO_TCP)
903 txd->opts1 = cpu_to_le32(first_eor | first_len | 903 txd->opts1 = cpu_to_le32(first_eor | first_len |
904 FirstFrag | DescOwn | 904 FirstFrag | DescOwn |
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index 1c01e9b3d07c..826548644d7b 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -2040,7 +2040,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
2040 */ 2040 */
2041 if (bd_flags & BD_FLG_TCP_UDP_SUM) { 2041 if (bd_flags & BD_FLG_TCP_UDP_SUM) {
2042 skb->csum = htons(csum); 2042 skb->csum = htons(csum);
2043 skb->ip_summed = CHECKSUM_HW; 2043 skb->ip_summed = CHECKSUM_COMPLETE;
2044 } else { 2044 } else {
2045 skb->ip_summed = CHECKSUM_NONE; 2045 skb->ip_summed = CHECKSUM_NONE;
2046 } 2046 }
@@ -2511,7 +2511,7 @@ restart:
2511 2511
2512 mapping = ace_map_tx_skb(ap, skb, skb, idx); 2512 mapping = ace_map_tx_skb(ap, skb, skb, idx);
2513 flagsize = (skb->len << 16) | (BD_FLG_END); 2513 flagsize = (skb->len << 16) | (BD_FLG_END);
2514 if (skb->ip_summed == CHECKSUM_HW) 2514 if (skb->ip_summed == CHECKSUM_PARTIAL)
2515 flagsize |= BD_FLG_TCP_UDP_SUM; 2515 flagsize |= BD_FLG_TCP_UDP_SUM;
2516#if ACENIC_DO_VLAN 2516#if ACENIC_DO_VLAN
2517 if (vlan_tx_tag_present(skb)) { 2517 if (vlan_tx_tag_present(skb)) {
@@ -2534,7 +2534,7 @@ restart:
2534 2534
2535 mapping = ace_map_tx_skb(ap, skb, NULL, idx); 2535 mapping = ace_map_tx_skb(ap, skb, NULL, idx);
2536 flagsize = (skb_headlen(skb) << 16); 2536 flagsize = (skb_headlen(skb) << 16);
2537 if (skb->ip_summed == CHECKSUM_HW) 2537 if (skb->ip_summed == CHECKSUM_PARTIAL)
2538 flagsize |= BD_FLG_TCP_UDP_SUM; 2538 flagsize |= BD_FLG_TCP_UDP_SUM;
2539#if ACENIC_DO_VLAN 2539#if ACENIC_DO_VLAN
2540 if (vlan_tx_tag_present(skb)) { 2540 if (vlan_tx_tag_present(skb)) {
@@ -2560,7 +2560,7 @@ restart:
2560 PCI_DMA_TODEVICE); 2560 PCI_DMA_TODEVICE);
2561 2561
2562 flagsize = (frag->size << 16); 2562 flagsize = (frag->size << 16);
2563 if (skb->ip_summed == CHECKSUM_HW) 2563 if (skb->ip_summed == CHECKSUM_PARTIAL)
2564 flagsize |= BD_FLG_TCP_UDP_SUM; 2564 flagsize |= BD_FLG_TCP_UDP_SUM;
2565 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); 2565 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
2566 2566
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 979a33df0a8c..96d8a694d433 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -161,6 +161,7 @@ static struct pci_device_id com20020pci_id_table[] = {
161 { 0x1571, 0xa204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, 161 { 0x1571, 0xa204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
162 { 0x1571, 0xa205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, 162 { 0x1571, 0xa205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
163 { 0x1571, 0xa206, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, 163 { 0x1571, 0xa206, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
164 { 0x10B5, 0x9030, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
164 { 0x10B5, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, 165 { 0x10B5, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
165 {0,} 166 {0,}
166}; 167};
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 6fad83f24c4f..711609665632 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -1264,7 +1264,8 @@ static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_i
1264{ 1264{
1265 int j, rev, ret; 1265 int j, rev, ret;
1266 struct bmac_data *bp; 1266 struct bmac_data *bp;
1267 unsigned char *addr; 1267 const unsigned char *prop_addr;
1268 unsigned char addr[6];
1268 struct net_device *dev; 1269 struct net_device *dev;
1269 int is_bmac_plus = ((int)match->data) != 0; 1270 int is_bmac_plus = ((int)match->data) != 0;
1270 1271
@@ -1272,14 +1273,16 @@ static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_i
1272 printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n"); 1273 printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n");
1273 return -ENODEV; 1274 return -ENODEV;
1274 } 1275 }
1275 addr = get_property(macio_get_of_node(mdev), "mac-address", NULL); 1276 prop_addr = get_property(macio_get_of_node(mdev), "mac-address", NULL);
1276 if (addr == NULL) { 1277 if (prop_addr == NULL) {
1277 addr = get_property(macio_get_of_node(mdev), "local-mac-address", NULL); 1278 prop_addr = get_property(macio_get_of_node(mdev),
1278 if (addr == NULL) { 1279 "local-mac-address", NULL);
1280 if (prop_addr == NULL) {
1279 printk(KERN_ERR "BMAC: Can't get mac-address\n"); 1281 printk(KERN_ERR "BMAC: Can't get mac-address\n");
1280 return -ENODEV; 1282 return -ENODEV;
1281 } 1283 }
1282 } 1284 }
1285 memcpy(addr, prop_addr, sizeof(addr));
1283 1286
1284 dev = alloc_etherdev(PRIV_BYTES); 1287 dev = alloc_etherdev(PRIV_BYTES);
1285 if (!dev) { 1288 if (!dev) {
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 652eb05a6c2d..7857b4630124 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -4423,7 +4423,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4423 ring_prod = TX_RING_IDX(prod); 4423 ring_prod = TX_RING_IDX(prod);
4424 4424
4425 vlan_tag_flags = 0; 4425 vlan_tag_flags = 0;
4426 if (skb->ip_summed == CHECKSUM_HW) { 4426 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4427 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 4427 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4428 } 4428 }
4429 4429
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index a31544ccb3c4..558fdb8ad2dc 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -2167,7 +2167,7 @@ end_copy_pkt:
2167 cas_page_unmap(addr); 2167 cas_page_unmap(addr);
2168 } 2168 }
2169 skb->csum = ntohs(i ^ 0xffff); 2169 skb->csum = ntohs(i ^ 0xffff);
2170 skb->ip_summed = CHECKSUM_HW; 2170 skb->ip_summed = CHECKSUM_COMPLETE;
2171 skb->protocol = eth_type_trans(skb, cp->dev); 2171 skb->protocol = eth_type_trans(skb, cp->dev);
2172 return len; 2172 return len;
2173} 2173}
@@ -2821,7 +2821,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2821 } 2821 }
2822 2822
2823 ctrl = 0; 2823 ctrl = 0;
2824 if (skb->ip_summed == CHECKSUM_HW) { 2824 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2825 u64 csum_start_off, csum_stuff_off; 2825 u64 csum_start_off, csum_stuff_off;
2826 2826
2827 csum_start_off = (u64) (skb->h.raw - skb->data); 2827 csum_start_off = (u64) (skb->h.raw - skb->data);
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 61b3754f50ff..ddd0bdb498f4 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1470,9 +1470,9 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1470 } 1470 }
1471 1471
1472 if (!(adapter->flags & UDP_CSUM_CAPABLE) && 1472 if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
1473 skb->ip_summed == CHECKSUM_HW && 1473 skb->ip_summed == CHECKSUM_PARTIAL &&
1474 skb->nh.iph->protocol == IPPROTO_UDP) 1474 skb->nh.iph->protocol == IPPROTO_UDP)
1475 if (unlikely(skb_checksum_help(skb, 0))) { 1475 if (unlikely(skb_checksum_help(skb))) {
1476 dev_kfree_skb_any(skb); 1476 dev_kfree_skb_any(skb);
1477 return NETDEV_TX_OK; 1477 return NETDEV_TX_OK;
1478 } 1478 }
@@ -1495,11 +1495,11 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1495 cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl)); 1495 cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
1496 cpl->opcode = CPL_TX_PKT; 1496 cpl->opcode = CPL_TX_PKT;
1497 cpl->ip_csum_dis = 1; /* SW calculates IP csum */ 1497 cpl->ip_csum_dis = 1; /* SW calculates IP csum */
1498 cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_HW ? 0 : 1; 1498 cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1;
1499 /* the length field isn't used so don't bother setting it */ 1499 /* the length field isn't used so don't bother setting it */
1500 1500
1501 st->tx_cso += (skb->ip_summed == CHECKSUM_HW); 1501 st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL);
1502 sge->stats.tx_do_cksum += (skb->ip_summed == CHECKSUM_HW); 1502 sge->stats.tx_do_cksum += (skb->ip_summed == CHECKSUM_PARTIAL);
1503 sge->stats.tx_reg_pkts++; 1503 sge->stats.tx_reg_pkts++;
1504 } 1504 }
1505 cpl->iff = dev->if_port; 1505 cpl->iff = dev->if_port;
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 402961e68c89..b74e67654764 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -611,7 +611,7 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
611 txdesc = &np->tx_ring[entry]; 611 txdesc = &np->tx_ring[entry];
612 612
613#if 0 613#if 0
614 if (skb->ip_summed == CHECKSUM_HW) { 614 if (skb->ip_summed == CHECKSUM_PARTIAL) {
615 txdesc->status |= 615 txdesc->status |=
616 cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable | 616 cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable |
617 IPChecksumEnable); 617 IPChecksumEnable);
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 98ef9f85482f..2ab9f96f5dab 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2600,7 +2600,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2600 unsigned int i; 2600 unsigned int i;
2601 uint8_t css; 2601 uint8_t css;
2602 2602
2603 if (likely(skb->ip_summed == CHECKSUM_HW)) { 2603 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
2604 css = skb->h.raw - skb->data; 2604 css = skb->h.raw - skb->data;
2605 2605
2606 i = tx_ring->next_to_use; 2606 i = tx_ring->next_to_use;
@@ -2927,11 +2927,11 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2927 } 2927 }
2928 2928
2929 /* reserve a descriptor for the offload context */ 2929 /* reserve a descriptor for the offload context */
2930 if ((mss) || (skb->ip_summed == CHECKSUM_HW)) 2930 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
2931 count++; 2931 count++;
2932 count++; 2932 count++;
2933#else 2933#else
2934 if (skb->ip_summed == CHECKSUM_HW) 2934 if (skb->ip_summed == CHECKSUM_PARTIAL)
2935 count++; 2935 count++;
2936#endif 2936#endif
2937 2937
@@ -3608,7 +3608,7 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
3608 */ 3608 */
3609 csum = ntohl(csum ^ 0xFFFF); 3609 csum = ntohl(csum ^ 0xFFFF);
3610 skb->csum = csum; 3610 skb->csum = csum;
3611 skb->ip_summed = CHECKSUM_HW; 3611 skb->ip_summed = CHECKSUM_COMPLETE;
3612 } 3612 }
3613 adapter->hw_csum_good++; 3613 adapter->hw_csum_good++;
3614} 3614}
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 11b8f1b43dd5..32cacf115f75 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -1503,7 +1503,8 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1503 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 1503 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
1504 else 1504 else
1505#endif 1505#endif
1506 tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0); 1506 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
1507 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
1507 1508
1508 /* vlan tag */ 1509 /* vlan tag */
1509 if (np->vlangrp && vlan_tx_tag_present(skb)) { 1510 if (np->vlangrp && vlan_tx_tag_present(skb)) {
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index ebbbd6ca6204..ba960913c034 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -947,7 +947,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
947 947
948 /* Set up checksumming */ 948 /* Set up checksumming */
949 if (likely((dev->features & NETIF_F_IP_CSUM) 949 if (likely((dev->features & NETIF_F_IP_CSUM)
950 && (CHECKSUM_HW == skb->ip_summed))) { 950 && (CHECKSUM_PARTIAL == skb->ip_summed))) {
951 fcb = gfar_add_fcb(skb, txbdp); 951 fcb = gfar_add_fcb(skb, txbdp);
952 status |= TXBD_TOE; 952 status |= TXBD_TOE;
953 gfar_tx_checksum(skb, fcb); 953 gfar_tx_checksum(skb, fcb);
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 409c6aab0411..763373ae9666 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1648,7 +1648,7 @@ static int hamachi_rx(struct net_device *dev)
1648 * could do the pseudo myself and return 1648 * could do the pseudo myself and return
1649 * CHECKSUM_UNNECESSARY 1649 * CHECKSUM_UNNECESSARY
1650 */ 1650 */
1651 skb->ip_summed = CHECKSUM_HW; 1651 skb->ip_summed = CHECKSUM_COMPLETE;
1652 } 1652 }
1653 } 1653 }
1654 } 1654 }
diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c
index 82468e2dc799..57e214d85e9a 100644
--- a/drivers/net/ibm_emac/ibm_emac_core.c
+++ b/drivers/net/ibm_emac/ibm_emac_core.c
@@ -1036,7 +1036,7 @@ static inline u16 emac_tx_csum(struct ocp_enet_private *dev,
1036 struct sk_buff *skb) 1036 struct sk_buff *skb)
1037{ 1037{
1038#if defined(CONFIG_IBM_EMAC_TAH) 1038#if defined(CONFIG_IBM_EMAC_TAH)
1039 if (skb->ip_summed == CHECKSUM_HW) { 1039 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1040 ++dev->stats.tx_packets_csum; 1040 ++dev->stats.tx_packets_csum;
1041 return EMAC_TX_CTRL_TAH_CSUM; 1041 return EMAC_TX_CTRL_TAH_CSUM;
1042 } 1042 }
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 0464e78f733a..e56eac88b809 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -702,7 +702,8 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
702 desc[3].desc, 702 desc[3].desc,
703 desc[4].desc, 703 desc[4].desc,
704 desc[5].desc, 704 desc[5].desc,
705 correlator); 705 correlator,
706 &correlator);
706 } while ((lpar_rc == H_BUSY) && (retry_count--)); 707 } while ((lpar_rc == H_BUSY) && (retry_count--));
707 708
708 if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) { 709 if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) {
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h
index 8385bf836507..f5b25bff1540 100644
--- a/drivers/net/ibmveth.h
+++ b/drivers/net/ibmveth.h
@@ -41,16 +41,6 @@
41#define IbmVethMcastRemoveFilter 0x2UL 41#define IbmVethMcastRemoveFilter 0x2UL
42#define IbmVethMcastClearFilterTable 0x3UL 42#define IbmVethMcastClearFilterTable 0x3UL
43 43
44/* hcall numbers */
45#define H_VIO_SIGNAL 0x104
46#define H_REGISTER_LOGICAL_LAN 0x114
47#define H_FREE_LOGICAL_LAN 0x118
48#define H_ADD_LOGICAL_LAN_BUFFER 0x11C
49#define H_SEND_LOGICAL_LAN 0x120
50#define H_MULTICAST_CTRL 0x130
51#define H_CHANGE_LOGICAL_LAN_MAC 0x14C
52#define H_FREE_LOGICAL_LAN_BUFFER 0x1D4
53
54/* hcall macros */ 44/* hcall macros */
55#define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \ 45#define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \
56 plpar_hcall_norets(H_REGISTER_LOGICAL_LAN, ua, buflst, rxq, fltlst, mac) 46 plpar_hcall_norets(H_REGISTER_LOGICAL_LAN, ua, buflst, rxq, fltlst, mac)
@@ -61,8 +51,21 @@
61#define h_add_logical_lan_buffer(ua, buf) \ 51#define h_add_logical_lan_buffer(ua, buf) \
62 plpar_hcall_norets(H_ADD_LOGICAL_LAN_BUFFER, ua, buf) 52 plpar_hcall_norets(H_ADD_LOGICAL_LAN_BUFFER, ua, buf)
63 53
64#define h_send_logical_lan(ua, buf1, buf2, buf3, buf4, buf5, buf6, correlator) \ 54static inline long h_send_logical_lan(unsigned long unit_address,
65 plpar_hcall_8arg_2ret(H_SEND_LOGICAL_LAN, ua, buf1, buf2, buf3, buf4, buf5, buf6, correlator, &correlator) 55 unsigned long desc1, unsigned long desc2, unsigned long desc3,
56 unsigned long desc4, unsigned long desc5, unsigned long desc6,
57 unsigned long corellator_in, unsigned long *corellator_out)
58{
59 long rc;
60 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
61
62 rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address, desc1,
63 desc2, desc3, desc4, desc5, desc6, corellator_in);
64
65 *corellator_out = retbuf[0];
66
67 return rc;
68}
66 69
67#define h_multicast_ctrl(ua, cmd, mac) \ 70#define h_multicast_ctrl(ua, cmd, mac) \
68 plpar_hcall_norets(H_MULTICAST_CTRL, ua, cmd, mac) 71 plpar_hcall_norets(H_MULTICAST_CTRL, ua, cmd, mac)
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 68d8af7df08e..65f897ddb920 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -1387,7 +1387,7 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1387 * MAC header which should not be summed and the TCP/UDP pseudo headers 1387 * MAC header which should not be summed and the TCP/UDP pseudo headers
1388 * manually. 1388 * manually.
1389 */ 1389 */
1390 if (skb->ip_summed == CHECKSUM_HW) { 1390 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1391 int proto = ntohs(skb->nh.iph->protocol); 1391 int proto = ntohs(skb->nh.iph->protocol);
1392 unsigned int csoff; 1392 unsigned int csoff;
1393 struct iphdr *ih = skb->nh.iph; 1393 struct iphdr *ih = skb->nh.iph;
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index e3c8cd5eca67..68d4c418cb98 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -249,7 +249,7 @@ static void __exit ali_ircc_cleanup(void)
249 249
250 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__); 250 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
251 251
252 for (i=0; i < 4; i++) { 252 for (i=0; i < ARRAY_SIZE(dev_self); i++) {
253 if (dev_self[i]) 253 if (dev_self[i])
254 ali_ircc_close(dev_self[i]); 254 ali_ircc_close(dev_self[i]);
255 } 255 }
@@ -273,6 +273,12 @@ static int ali_ircc_open(int i, chipio_t *info)
273 int err; 273 int err;
274 274
275 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__); 275 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
276
277 if (i >= ARRAY_SIZE(dev_self)) {
278 IRDA_ERROR("%s(), maximum number of supported chips reached!\n",
279 __FUNCTION__);
280 return -ENOMEM;
281 }
276 282
277 /* Set FIR FIFO and DMA Threshold */ 283 /* Set FIR FIFO and DMA Threshold */
278 if ((ali_ircc_setup(info)) == -1) 284 if ((ali_ircc_setup(info)) == -1)
diff --git a/drivers/net/irda/irport.c b/drivers/net/irda/irport.c
index 44efd49bf4a9..ba4f3eb988b3 100644
--- a/drivers/net/irda/irport.c
+++ b/drivers/net/irda/irport.c
@@ -1090,7 +1090,7 @@ static int __init irport_init(void)
1090{ 1090{
1091 int i; 1091 int i;
1092 1092
1093 for (i=0; (io[i] < 2000) && (i < 4); i++) { 1093 for (i=0; (io[i] < 2000) && (i < ARRAY_SIZE(dev_self)); i++) {
1094 if (irport_open(i, io[i], irq[i]) != NULL) 1094 if (irport_open(i, io[i], irq[i]) != NULL)
1095 return 0; 1095 return 0;
1096 } 1096 }
@@ -1112,7 +1112,7 @@ static void __exit irport_cleanup(void)
1112 1112
1113 IRDA_DEBUG( 4, "%s()\n", __FUNCTION__); 1113 IRDA_DEBUG( 4, "%s()\n", __FUNCTION__);
1114 1114
1115 for (i=0; i < 4; i++) { 1115 for (i=0; i < ARRAY_SIZE(dev_self); i++) {
1116 if (dev_self[i]) 1116 if (dev_self[i])
1117 irport_close(dev_self[i]); 1117 irport_close(dev_self[i]);
1118 } 1118 }
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 8bafb455c102..79b85f327500 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -279,7 +279,7 @@ static void via_ircc_clean(void)
279 279
280 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 280 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
281 281
282 for (i=0; i < 4; i++) { 282 for (i=0; i < ARRAY_SIZE(dev_self); i++) {
283 if (dev_self[i]) 283 if (dev_self[i])
284 via_ircc_close(dev_self[i]); 284 via_ircc_close(dev_self[i]);
285 } 285 }
@@ -327,6 +327,9 @@ static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
327 327
328 IRDA_DEBUG(3, "%s()\n", __FUNCTION__); 328 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
329 329
330 if (i >= ARRAY_SIZE(dev_self))
331 return -ENOMEM;
332
330 /* Allocate new instance of the driver */ 333 /* Allocate new instance of the driver */
331 dev = alloc_irdadev(sizeof(struct via_ircc_cb)); 334 dev = alloc_irdadev(sizeof(struct via_ircc_cb));
332 if (dev == NULL) 335 if (dev == NULL)
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index 0ea65c4c6f85..8421597072a7 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -117,7 +117,7 @@ static int __init w83977af_init(void)
117 117
118 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 118 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
119 119
120 for (i=0; (io[i] < 2000) && (i < 4); i++) { 120 for (i=0; (io[i] < 2000) && (i < ARRAY_SIZE(dev_self)); i++) {
121 if (w83977af_open(i, io[i], irq[i], dma[i]) == 0) 121 if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
122 return 0; 122 return 0;
123 } 123 }
@@ -136,7 +136,7 @@ static void __exit w83977af_cleanup(void)
136 136
137 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ ); 137 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
138 138
139 for (i=0; i < 4; i++) { 139 for (i=0; i < ARRAY_SIZE(dev_self); i++) {
140 if (dev_self[i]) 140 if (dev_self[i])
141 w83977af_close(dev_self[i]); 141 w83977af_close(dev_self[i]);
142 } 142 }
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 7bbd447289b5..9405b44f3214 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1232,7 +1232,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1232 unsigned int i; 1232 unsigned int i;
1233 uint8_t css, cso; 1233 uint8_t css, cso;
1234 1234
1235 if(likely(skb->ip_summed == CHECKSUM_HW)) { 1235 if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1236 css = skb->h.raw - skb->data; 1236 css = skb->h.raw - skb->data;
1237 cso = (skb->h.raw + skb->csum) - skb->data; 1237 cso = (skb->h.raw + skb->csum) - skb->data;
1238 1238
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index b783a6984abc..393aba95cf12 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -442,16 +442,16 @@ init_rx_bufs(struct net_device *dev, int num) {
442 if (rbd) { 442 if (rbd) {
443 rbd->pad = 0; 443 rbd->pad = 0;
444 rbd->count = 0; 444 rbd->count = 0;
445 rbd->skb = dev_alloc_skb(RX_SKB_SIZE); 445 rbd->skb = dev_alloc_skb(RX_SKBSIZE);
446 if (!rbd->skb) { 446 if (!rbd->skb) {
447 printk("dev_alloc_skb failed"); 447 printk("dev_alloc_skb failed");
448 } 448 }
449 rbd->next = rfd->rbd; 449 rbd->next = rfd->rbd;
450 if (i) { 450 if (i) {
451 rfd->rbd->prev = rbd; 451 rfd->rbd->prev = rbd;
452 rbd->size = RX_SKB_SIZE; 452 rbd->size = RX_SKBSIZE;
453 } else { 453 } else {
454 rbd->size = (RX_SKB_SIZE | RBD_EL); 454 rbd->size = (RX_SKBSIZE | RBD_EL);
455 lp->rbd_tail = rbd; 455 lp->rbd_tail = rbd;
456 } 456 }
457 457
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index 29e4b5aa6ead..5d80e0e6a8e9 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -113,7 +113,7 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i
113 struct device_node *mace = macio_get_of_node(mdev); 113 struct device_node *mace = macio_get_of_node(mdev);
114 struct net_device *dev; 114 struct net_device *dev;
115 struct mace_data *mp; 115 struct mace_data *mp;
116 unsigned char *addr; 116 const unsigned char *addr;
117 int j, rev, rc = -EBUSY; 117 int j, rev, rc = -EBUSY;
118 118
119 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) { 119 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index eeab1df5bef3..d4dcc856b3cd 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -385,7 +385,7 @@ static int mv643xx_eth_receive_queue(struct net_device *dev, int budget)
385 struct pkt_info pkt_info; 385 struct pkt_info pkt_info;
386 386
387 while (budget-- > 0 && eth_port_receive(mp, &pkt_info) == ETH_OK) { 387 while (budget-- > 0 && eth_port_receive(mp, &pkt_info) == ETH_OK) {
388 dma_unmap_single(NULL, pkt_info.buf_ptr, RX_SKB_SIZE, 388 dma_unmap_single(NULL, pkt_info.buf_ptr, ETH_RX_SKB_SIZE,
389 DMA_FROM_DEVICE); 389 DMA_FROM_DEVICE);
390 mp->rx_desc_count--; 390 mp->rx_desc_count--;
391 received_packets++; 391 received_packets++;
@@ -1147,7 +1147,7 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp,
1147 desc->byte_cnt = length; 1147 desc->byte_cnt = length;
1148 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE); 1148 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
1149 1149
1150 if (skb->ip_summed == CHECKSUM_HW) { 1150 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1151 BUG_ON(skb->protocol != ETH_P_IP); 1151 BUG_ON(skb->protocol != ETH_P_IP);
1152 1152
1153 cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM | 1153 cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM |
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 9bdd43ab3573..9f16681d0e7e 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -930,7 +930,7 @@ static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, u16 hw_csum)
930 (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) || 930 (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) ||
931 vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) { 931 vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) {
932 skb->csum = hw_csum; 932 skb->csum = hw_csum;
933 skb->ip_summed = CHECKSUM_HW; 933 skb->ip_summed = CHECKSUM_COMPLETE;
934 } 934 }
935} 935}
936 936
@@ -973,7 +973,7 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
973 if ((skb->protocol == ntohs(ETH_P_IP)) || 973 if ((skb->protocol == ntohs(ETH_P_IP)) ||
974 (skb->protocol == ntohs(ETH_P_IPV6))) { 974 (skb->protocol == ntohs(ETH_P_IPV6))) {
975 skb->csum = ntohs((u16) csum); 975 skb->csum = ntohs((u16) csum);
976 skb->ip_summed = CHECKSUM_HW; 976 skb->ip_summed = CHECKSUM_COMPLETE;
977 } else 977 } else
978 myri10ge_vlan_ip_csum(skb, ntohs((u16) csum)); 978 myri10ge_vlan_ip_csum(skb, ntohs((u16) csum));
979 } 979 }
@@ -1897,13 +1897,13 @@ again:
1897 pseudo_hdr_offset = 0; 1897 pseudo_hdr_offset = 0;
1898 odd_flag = 0; 1898 odd_flag = 0;
1899 flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST); 1899 flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
1900 if (likely(skb->ip_summed == CHECKSUM_HW)) { 1900 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1901 cksum_offset = (skb->h.raw - skb->data); 1901 cksum_offset = (skb->h.raw - skb->data);
1902 pseudo_hdr_offset = (skb->h.raw + skb->csum) - skb->data; 1902 pseudo_hdr_offset = (skb->h.raw + skb->csum) - skb->data;
1903 /* If the headers are excessively large, then we must 1903 /* If the headers are excessively large, then we must
1904 * fall back to a software checksum */ 1904 * fall back to a software checksum */
1905 if (unlikely(cksum_offset > 255 || pseudo_hdr_offset > 127)) { 1905 if (unlikely(cksum_offset > 255 || pseudo_hdr_offset > 127)) {
1906 if (skb_checksum_help(skb, 0)) 1906 if (skb_checksum_help(skb))
1907 goto drop; 1907 goto drop;
1908 cksum_offset = 0; 1908 cksum_offset = 0;
1909 pseudo_hdr_offset = 0; 1909 pseudo_hdr_offset = 0;
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 0e76859c90a2..5143f5dbb2e5 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1153,7 +1153,7 @@ again:
1153 if (!nr_frags) 1153 if (!nr_frags)
1154 frag = NULL; 1154 frag = NULL;
1155 extsts = 0; 1155 extsts = 0;
1156 if (skb->ip_summed == CHECKSUM_HW) { 1156 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1157 extsts |= EXTSTS_IPPKT; 1157 extsts |= EXTSTS_IPPKT;
1158 if (IPPROTO_TCP == skb->nh.iph->protocol) 1158 if (IPPROTO_TCP == skb->nh.iph->protocol)
1159 extsts |= EXTSTS_TCPPKT; 1159 extsts |= EXTSTS_TCPPKT;
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index 51ff9a9d1bb5..f3655fd772f5 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -43,6 +43,7 @@
43 * deprecated in 2.6 43 * deprecated in 2.6
44 */ 44 */
45 45
46#include <linux/err.h>
46#include <linux/module.h> 47#include <linux/module.h>
47#include <linux/kernel.h> 48#include <linux/kernel.h>
48#include <linux/version.h> 49#include <linux/version.h>
@@ -64,12 +65,13 @@ MODULE_LICENSE("Dual BSD/GPL");
64MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE)); 65MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE));
65MODULE_VERSION("1.0.2"); 66MODULE_VERSION("1.0.2");
66 67
67static void 68static unsigned int
68setup_sg(struct scatterlist *sg, const void *address, unsigned int length) 69setup_sg(struct scatterlist *sg, const void *address, unsigned int length)
69{ 70{
70 sg[0].page = virt_to_page(address); 71 sg[0].page = virt_to_page(address);
71 sg[0].offset = offset_in_page(address); 72 sg[0].offset = offset_in_page(address);
72 sg[0].length = length; 73 sg[0].length = length;
74 return length;
73} 75}
74 76
75#define SHA1_PAD_SIZE 40 77#define SHA1_PAD_SIZE 40
@@ -95,8 +97,8 @@ static inline void sha_pad_init(struct sha_pad *shapad)
95 * State for an MPPE (de)compressor. 97 * State for an MPPE (de)compressor.
96 */ 98 */
97struct ppp_mppe_state { 99struct ppp_mppe_state {
98 struct crypto_tfm *arc4; 100 struct crypto_blkcipher *arc4;
99 struct crypto_tfm *sha1; 101 struct crypto_hash *sha1;
100 unsigned char *sha1_digest; 102 unsigned char *sha1_digest;
101 unsigned char master_key[MPPE_MAX_KEY_LEN]; 103 unsigned char master_key[MPPE_MAX_KEY_LEN];
102 unsigned char session_key[MPPE_MAX_KEY_LEN]; 104 unsigned char session_key[MPPE_MAX_KEY_LEN];
@@ -136,14 +138,21 @@ struct ppp_mppe_state {
136 */ 138 */
137static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *InterimKey) 139static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *InterimKey)
138{ 140{
141 struct hash_desc desc;
139 struct scatterlist sg[4]; 142 struct scatterlist sg[4];
143 unsigned int nbytes;
140 144
141 setup_sg(&sg[0], state->master_key, state->keylen); 145 nbytes = setup_sg(&sg[0], state->master_key, state->keylen);
142 setup_sg(&sg[1], sha_pad->sha_pad1, sizeof(sha_pad->sha_pad1)); 146 nbytes += setup_sg(&sg[1], sha_pad->sha_pad1,
143 setup_sg(&sg[2], state->session_key, state->keylen); 147 sizeof(sha_pad->sha_pad1));
144 setup_sg(&sg[3], sha_pad->sha_pad2, sizeof(sha_pad->sha_pad2)); 148 nbytes += setup_sg(&sg[2], state->session_key, state->keylen);
149 nbytes += setup_sg(&sg[3], sha_pad->sha_pad2,
150 sizeof(sha_pad->sha_pad2));
145 151
146 crypto_digest_digest (state->sha1, sg, 4, state->sha1_digest); 152 desc.tfm = state->sha1;
153 desc.flags = 0;
154
155 crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest);
147 156
148 memcpy(InterimKey, state->sha1_digest, state->keylen); 157 memcpy(InterimKey, state->sha1_digest, state->keylen);
149} 158}
@@ -156,14 +165,15 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
156{ 165{
157 unsigned char InterimKey[MPPE_MAX_KEY_LEN]; 166 unsigned char InterimKey[MPPE_MAX_KEY_LEN];
158 struct scatterlist sg_in[1], sg_out[1]; 167 struct scatterlist sg_in[1], sg_out[1];
168 struct blkcipher_desc desc = { .tfm = state->arc4 };
159 169
160 get_new_key_from_sha(state, InterimKey); 170 get_new_key_from_sha(state, InterimKey);
161 if (!initial_key) { 171 if (!initial_key) {
162 crypto_cipher_setkey(state->arc4, InterimKey, state->keylen); 172 crypto_blkcipher_setkey(state->arc4, InterimKey, state->keylen);
163 setup_sg(sg_in, InterimKey, state->keylen); 173 setup_sg(sg_in, InterimKey, state->keylen);
164 setup_sg(sg_out, state->session_key, state->keylen); 174 setup_sg(sg_out, state->session_key, state->keylen);
165 if (crypto_cipher_encrypt(state->arc4, sg_out, sg_in, 175 if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
166 state->keylen) != 0) { 176 state->keylen) != 0) {
167 printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n"); 177 printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n");
168 } 178 }
169 } else { 179 } else {
@@ -175,7 +185,7 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
175 state->session_key[1] = 0x26; 185 state->session_key[1] = 0x26;
176 state->session_key[2] = 0x9e; 186 state->session_key[2] = 0x9e;
177 } 187 }
178 crypto_cipher_setkey(state->arc4, state->session_key, state->keylen); 188 crypto_blkcipher_setkey(state->arc4, state->session_key, state->keylen);
179} 189}
180 190
181/* 191/*
@@ -196,15 +206,19 @@ static void *mppe_alloc(unsigned char *options, int optlen)
196 206
197 memset(state, 0, sizeof(*state)); 207 memset(state, 0, sizeof(*state));
198 208
199 state->arc4 = crypto_alloc_tfm("arc4", 0); 209 state->arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
200 if (!state->arc4) 210 if (IS_ERR(state->arc4)) {
211 state->arc4 = NULL;
201 goto out_free; 212 goto out_free;
213 }
202 214
203 state->sha1 = crypto_alloc_tfm("sha1", 0); 215 state->sha1 = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC);
204 if (!state->sha1) 216 if (IS_ERR(state->sha1)) {
217 state->sha1 = NULL;
205 goto out_free; 218 goto out_free;
219 }
206 220
207 digestsize = crypto_tfm_alg_digestsize(state->sha1); 221 digestsize = crypto_hash_digestsize(state->sha1);
208 if (digestsize < MPPE_MAX_KEY_LEN) 222 if (digestsize < MPPE_MAX_KEY_LEN)
209 goto out_free; 223 goto out_free;
210 224
@@ -229,9 +243,9 @@ static void *mppe_alloc(unsigned char *options, int optlen)
229 if (state->sha1_digest) 243 if (state->sha1_digest)
230 kfree(state->sha1_digest); 244 kfree(state->sha1_digest);
231 if (state->sha1) 245 if (state->sha1)
232 crypto_free_tfm(state->sha1); 246 crypto_free_hash(state->sha1);
233 if (state->arc4) 247 if (state->arc4)
234 crypto_free_tfm(state->arc4); 248 crypto_free_blkcipher(state->arc4);
235 kfree(state); 249 kfree(state);
236 out: 250 out:
237 return NULL; 251 return NULL;
@@ -247,9 +261,9 @@ static void mppe_free(void *arg)
247 if (state->sha1_digest) 261 if (state->sha1_digest)
248 kfree(state->sha1_digest); 262 kfree(state->sha1_digest);
249 if (state->sha1) 263 if (state->sha1)
250 crypto_free_tfm(state->sha1); 264 crypto_free_hash(state->sha1);
251 if (state->arc4) 265 if (state->arc4)
252 crypto_free_tfm(state->arc4); 266 crypto_free_blkcipher(state->arc4);
253 kfree(state); 267 kfree(state);
254 } 268 }
255} 269}
@@ -356,6 +370,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
356 int isize, int osize) 370 int isize, int osize)
357{ 371{
358 struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; 372 struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
373 struct blkcipher_desc desc = { .tfm = state->arc4 };
359 int proto; 374 int proto;
360 struct scatterlist sg_in[1], sg_out[1]; 375 struct scatterlist sg_in[1], sg_out[1];
361 376
@@ -413,7 +428,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
413 /* Encrypt packet */ 428 /* Encrypt packet */
414 setup_sg(sg_in, ibuf, isize); 429 setup_sg(sg_in, ibuf, isize);
415 setup_sg(sg_out, obuf, osize); 430 setup_sg(sg_out, obuf, osize);
416 if (crypto_cipher_encrypt(state->arc4, sg_out, sg_in, isize) != 0) { 431 if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, isize) != 0) {
417 printk(KERN_DEBUG "crypto_cypher_encrypt failed\n"); 432 printk(KERN_DEBUG "crypto_cypher_encrypt failed\n");
418 return -1; 433 return -1;
419 } 434 }
@@ -462,6 +477,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
462 int osize) 477 int osize)
463{ 478{
464 struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; 479 struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
480 struct blkcipher_desc desc = { .tfm = state->arc4 };
465 unsigned ccount; 481 unsigned ccount;
466 int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED; 482 int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
467 int sanity = 0; 483 int sanity = 0;
@@ -599,7 +615,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
599 */ 615 */
600 setup_sg(sg_in, ibuf, 1); 616 setup_sg(sg_in, ibuf, 1);
601 setup_sg(sg_out, obuf, 1); 617 setup_sg(sg_out, obuf, 1);
602 if (crypto_cipher_decrypt(state->arc4, sg_out, sg_in, 1) != 0) { 618 if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, 1) != 0) {
603 printk(KERN_DEBUG "crypto_cypher_decrypt failed\n"); 619 printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
604 return DECOMP_ERROR; 620 return DECOMP_ERROR;
605 } 621 }
@@ -619,7 +635,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
619 /* And finally, decrypt the rest of the packet. */ 635 /* And finally, decrypt the rest of the packet. */
620 setup_sg(sg_in, ibuf + 1, isize - 1); 636 setup_sg(sg_in, ibuf + 1, isize - 1);
621 setup_sg(sg_out, obuf + 1, osize - 1); 637 setup_sg(sg_out, obuf + 1, osize - 1);
622 if (crypto_cipher_decrypt(state->arc4, sg_out, sg_in, isize - 1) != 0) { 638 if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, isize - 1)) {
623 printk(KERN_DEBUG "crypto_cypher_decrypt failed\n"); 639 printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
624 return DECOMP_ERROR; 640 return DECOMP_ERROR;
625 } 641 }
@@ -694,8 +710,8 @@ static struct compressor ppp_mppe = {
694static int __init ppp_mppe_init(void) 710static int __init ppp_mppe_init(void)
695{ 711{
696 int answer; 712 int answer;
697 if (!(crypto_alg_available("arc4", 0) && 713 if (!(crypto_has_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) &&
698 crypto_alg_available("sha1", 0))) 714 crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC)))
699 return -ENODEV; 715 return -ENODEV;
700 716
701 sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL); 717 sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 4c2f575faad7..d9b960aa9b0d 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -2169,7 +2169,7 @@ static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
2169 if (mss) 2169 if (mss)
2170 return LargeSend | ((mss & MSSMask) << MSSShift); 2170 return LargeSend | ((mss & MSSMask) << MSSShift);
2171 } 2171 }
2172 if (skb->ip_summed == CHECKSUM_HW) { 2172 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2173 const struct iphdr *ip = skb->nh.iph; 2173 const struct iphdr *ip = skb->nh.iph;
2174 2174
2175 if (ip->protocol == IPPROTO_TCP) 2175 if (ip->protocol == IPPROTO_TCP)
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index e72e0e099060..5b3713f622d7 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -3893,7 +3893,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3893 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb)); 3893 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
3894 } 3894 }
3895#endif 3895#endif
3896 if (skb->ip_summed == CHECKSUM_HW) { 3896 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3897 txdp->Control_2 |= 3897 txdp->Control_2 |=
3898 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN | 3898 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3899 TXD_TX_CKO_UDP_EN); 3899 TXD_TX_CKO_UDP_EN);
diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c
index ee62845d3ac9..eb3b35180c2f 100644
--- a/drivers/net/sk98lin/skge.c
+++ b/drivers/net/sk98lin/skge.c
@@ -1559,7 +1559,7 @@ struct sk_buff *pMessage) /* pointer to send-message */
1559 pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32); 1559 pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32);
1560 pTxd->pMBuf = pMessage; 1560 pTxd->pMBuf = pMessage;
1561 1561
1562 if (pMessage->ip_summed == CHECKSUM_HW) { 1562 if (pMessage->ip_summed == CHECKSUM_PARTIAL) {
1563 u16 hdrlen = pMessage->h.raw - pMessage->data; 1563 u16 hdrlen = pMessage->h.raw - pMessage->data;
1564 u16 offset = hdrlen + pMessage->csum; 1564 u16 offset = hdrlen + pMessage->csum;
1565 1565
@@ -1678,7 +1678,7 @@ struct sk_buff *pMessage) /* pointer to send-message */
1678 /* 1678 /*
1679 ** Does the HW need to evaluate checksum for TCP or UDP packets? 1679 ** Does the HW need to evaluate checksum for TCP or UDP packets?
1680 */ 1680 */
1681 if (pMessage->ip_summed == CHECKSUM_HW) { 1681 if (pMessage->ip_summed == CHECKSUM_PARTIAL) {
1682 u16 hdrlen = pMessage->h.raw - pMessage->data; 1682 u16 hdrlen = pMessage->h.raw - pMessage->data;
1683 u16 offset = hdrlen + pMessage->csum; 1683 u16 offset = hdrlen + pMessage->csum;
1684 1684
@@ -2158,7 +2158,7 @@ rx_start:
2158 2158
2159#ifdef USE_SK_RX_CHECKSUM 2159#ifdef USE_SK_RX_CHECKSUM
2160 pMsg->csum = pRxd->TcpSums & 0xffff; 2160 pMsg->csum = pRxd->TcpSums & 0xffff;
2161 pMsg->ip_summed = CHECKSUM_HW; 2161 pMsg->ip_summed = CHECKSUM_COMPLETE;
2162#else 2162#else
2163 pMsg->ip_summed = CHECKSUM_NONE; 2163 pMsg->ip_summed = CHECKSUM_NONE;
2164#endif 2164#endif
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index ad878dfddef4..b3d6fa3d6df4 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -2338,7 +2338,7 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2338 td->dma_lo = map; 2338 td->dma_lo = map;
2339 td->dma_hi = map >> 32; 2339 td->dma_hi = map >> 32;
2340 2340
2341 if (skb->ip_summed == CHECKSUM_HW) { 2341 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2342 int offset = skb->h.raw - skb->data; 2342 int offset = skb->h.raw - skb->data;
2343 2343
2344 /* This seems backwards, but it is what the sk98lin 2344 /* This seems backwards, but it is what the sk98lin
@@ -2642,7 +2642,7 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
2642 skb->dev = skge->netdev; 2642 skb->dev = skge->netdev;
2643 if (skge->rx_csum) { 2643 if (skge->rx_csum) {
2644 skb->csum = csum; 2644 skb->csum = csum;
2645 skb->ip_summed = CHECKSUM_HW; 2645 skb->ip_summed = CHECKSUM_COMPLETE;
2646 } 2646 }
2647 2647
2648 skb->protocol = eth_type_trans(skb, skge->netdev); 2648 skb->protocol = eth_type_trans(skb, skge->netdev);
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 933e87f1cc68..8e92566b587e 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1163,7 +1163,7 @@ static unsigned tx_le_req(const struct sk_buff *skb)
1163 if (skb_is_gso(skb)) 1163 if (skb_is_gso(skb))
1164 ++count; 1164 ++count;
1165 1165
1166 if (skb->ip_summed == CHECKSUM_HW) 1166 if (skb->ip_summed == CHECKSUM_PARTIAL)
1167 ++count; 1167 ++count;
1168 1168
1169 return count; 1169 return count;
@@ -1272,7 +1272,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1272#endif 1272#endif
1273 1273
1274 /* Handle TCP checksum offload */ 1274 /* Handle TCP checksum offload */
1275 if (skb->ip_summed == CHECKSUM_HW) { 1275 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1276 u16 hdr = skb->h.raw - skb->data; 1276 u16 hdr = skb->h.raw - skb->data;
1277 u16 offset = hdr + skb->csum; 1277 u16 offset = hdr + skb->csum;
1278 1278
@@ -2000,7 +2000,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
2000#endif 2000#endif
2001 case OP_RXCHKS: 2001 case OP_RXCHKS:
2002 skb = sky2->rx_ring[sky2->rx_next].skb; 2002 skb = sky2->rx_ring[sky2->rx_next].skb;
2003 skb->ip_summed = CHECKSUM_HW; 2003 skb->ip_summed = CHECKSUM_COMPLETE;
2004 skb->csum = le16_to_cpu(status); 2004 skb->csum = le16_to_cpu(status);
2005 break; 2005 break;
2006 2006
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 88907218457a..d64e718afbd2 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1697,10 +1697,10 @@ spider_net_setup_phy(struct spider_net_card *card)
1697 */ 1697 */
1698static int 1698static int
1699spider_net_download_firmware(struct spider_net_card *card, 1699spider_net_download_firmware(struct spider_net_card *card,
1700 u8 *firmware_ptr) 1700 const void *firmware_ptr)
1701{ 1701{
1702 int sequencer, i; 1702 int sequencer, i;
1703 u32 *fw_ptr = (u32 *)firmware_ptr; 1703 const u32 *fw_ptr = firmware_ptr;
1704 1704
1705 /* stop sequencers */ 1705 /* stop sequencers */
1706 spider_net_write_reg(card, SPIDER_NET_GSINIT, 1706 spider_net_write_reg(card, SPIDER_NET_GSINIT,
@@ -1757,7 +1757,7 @@ spider_net_init_firmware(struct spider_net_card *card)
1757{ 1757{
1758 struct firmware *firmware = NULL; 1758 struct firmware *firmware = NULL;
1759 struct device_node *dn; 1759 struct device_node *dn;
1760 u8 *fw_prop = NULL; 1760 const u8 *fw_prop = NULL;
1761 int err = -ENOENT; 1761 int err = -ENOENT;
1762 int fw_size; 1762 int fw_size;
1763 1763
@@ -1783,7 +1783,7 @@ try_host_fw:
1783 if (!dn) 1783 if (!dn)
1784 goto out_err; 1784 goto out_err;
1785 1785
1786 fw_prop = (u8 *)get_property(dn, "firmware", &fw_size); 1786 fw_prop = get_property(dn, "firmware", &fw_size);
1787 if (!fw_prop) 1787 if (!fw_prop)
1788 goto out_err; 1788 goto out_err;
1789 1789
@@ -1986,7 +1986,7 @@ spider_net_setup_netdev(struct spider_net_card *card)
1986 struct net_device *netdev = card->netdev; 1986 struct net_device *netdev = card->netdev;
1987 struct device_node *dn; 1987 struct device_node *dn;
1988 struct sockaddr addr; 1988 struct sockaddr addr;
1989 u8 *mac; 1989 const u8 *mac;
1990 1990
1991 SET_MODULE_OWNER(netdev); 1991 SET_MODULE_OWNER(netdev);
1992 SET_NETDEV_DEV(netdev, &card->pdev->dev); 1992 SET_NETDEV_DEV(netdev, &card->pdev->dev);
@@ -2019,7 +2019,7 @@ spider_net_setup_netdev(struct spider_net_card *card)
2019 if (!dn) 2019 if (!dn)
2020 return -EIO; 2020 return -EIO;
2021 2021
2022 mac = (u8 *)get_property(dn, "local-mac-address", NULL); 2022 mac = get_property(dn, "local-mac-address", NULL);
2023 if (!mac) 2023 if (!mac)
2024 return -EIO; 2024 return -EIO;
2025 memcpy(addr.sa_data, mac, ETH_ALEN); 2025 memcpy(addr.sa_data, mac, ETH_ALEN);
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index c0a62b00ffc8..2607aa51d8e0 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1230,7 +1230,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
1230 } 1230 }
1231 1231
1232#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) 1232#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1233 if (skb->ip_summed == CHECKSUM_HW) { 1233 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1234 if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK)) 1234 if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK))
1235 return NETDEV_TX_OK; 1235 return NETDEV_TX_OK;
1236 } 1236 }
@@ -1252,7 +1252,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
1252 status |= TxDescIntr; 1252 status |= TxDescIntr;
1253 np->reap_tx = 0; 1253 np->reap_tx = 0;
1254 } 1254 }
1255 if (skb->ip_summed == CHECKSUM_HW) { 1255 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1256 status |= TxCalTCP; 1256 status |= TxCalTCP;
1257 np->stats.tx_compressed++; 1257 np->stats.tx_compressed++;
1258 } 1258 }
@@ -1499,7 +1499,7 @@ static int __netdev_rx(struct net_device *dev, int *quota)
1499 * Until then, the printk stays. :-) -Ion 1499 * Until then, the printk stays. :-) -Ion
1500 */ 1500 */
1501 else if (le16_to_cpu(desc->status2) & 0x0040) { 1501 else if (le16_to_cpu(desc->status2) & 0x0040) {
1502 skb->ip_summed = CHECKSUM_HW; 1502 skb->ip_summed = CHECKSUM_COMPLETE;
1503 skb->csum = le16_to_cpu(desc->csum); 1503 skb->csum = le16_to_cpu(desc->csum);
1504 printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2)); 1504 printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
1505 } 1505 }
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index b70bbd748978..b388651b7836 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -855,7 +855,7 @@ static int gem_rx(struct gem *gp, int work_to_do)
855 } 855 }
856 856
857 skb->csum = ntohs((status & RXDCTRL_TCPCSUM) ^ 0xffff); 857 skb->csum = ntohs((status & RXDCTRL_TCPCSUM) ^ 0xffff);
858 skb->ip_summed = CHECKSUM_HW; 858 skb->ip_summed = CHECKSUM_COMPLETE;
859 skb->protocol = eth_type_trans(skb, gp->dev); 859 skb->protocol = eth_type_trans(skb, gp->dev);
860 860
861 netif_receive_skb(skb); 861 netif_receive_skb(skb);
@@ -1026,7 +1026,7 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
1026 unsigned long flags; 1026 unsigned long flags;
1027 1027
1028 ctrl = 0; 1028 ctrl = 0;
1029 if (skb->ip_summed == CHECKSUM_HW) { 1029 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1030 u64 csum_start_off, csum_stuff_off; 1030 u64 csum_start_off, csum_stuff_off;
1031 1031
1032 csum_start_off = (u64) (skb->h.raw - skb->data); 1032 csum_start_off = (u64) (skb->h.raw - skb->data);
@@ -2896,7 +2896,7 @@ static int __devinit gem_get_device_address(struct gem *gp)
2896 if (use_idprom) 2896 if (use_idprom)
2897 memcpy(dev->dev_addr, idprom->id_ethaddr, 6); 2897 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2898#elif defined(CONFIG_PPC_PMAC) 2898#elif defined(CONFIG_PPC_PMAC)
2899 unsigned char *addr; 2899 const unsigned char *addr;
2900 2900
2901 addr = get_property(gp->of_node, "local-mac-address", NULL); 2901 addr = get_property(gp->of_node, "local-mac-address", NULL);
2902 if (addr == NULL) { 2902 if (addr == NULL) {
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index c6f5bc3c042f..17981da22730 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -1207,7 +1207,7 @@ static void happy_meal_transceiver_check(struct happy_meal *hp, void __iomem *tr
1207 * flags, thus: 1207 * flags, thus:
1208 * 1208 *
1209 * skb->csum = rxd->rx_flags & 0xffff; 1209 * skb->csum = rxd->rx_flags & 0xffff;
1210 * skb->ip_summed = CHECKSUM_HW; 1210 * skb->ip_summed = CHECKSUM_COMPLETE;
1211 * 1211 *
1212 * before sending off the skb to the protocols, and we are good as gold. 1212 * before sending off the skb to the protocols, and we are good as gold.
1213 */ 1213 */
@@ -2074,7 +2074,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
2074 2074
2075 /* This card is _fucking_ hot... */ 2075 /* This card is _fucking_ hot... */
2076 skb->csum = ntohs(csum ^ 0xffff); 2076 skb->csum = ntohs(csum ^ 0xffff);
2077 skb->ip_summed = CHECKSUM_HW; 2077 skb->ip_summed = CHECKSUM_COMPLETE;
2078 2078
2079 RXD(("len=%d csum=%4x]", len, csum)); 2079 RXD(("len=%d csum=%4x]", len, csum));
2080 skb->protocol = eth_type_trans(skb, dev); 2080 skb->protocol = eth_type_trans(skb, dev);
@@ -2268,7 +2268,7 @@ static int happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev)
2268 u32 tx_flags; 2268 u32 tx_flags;
2269 2269
2270 tx_flags = TXFLAG_OWN; 2270 tx_flags = TXFLAG_OWN;
2271 if (skb->ip_summed == CHECKSUM_HW) { 2271 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2272 u32 csum_start_off, csum_stuff_off; 2272 u32 csum_start_off, csum_stuff_off;
2273 2273
2274 csum_start_off = (u32) (skb->h.raw - skb->data); 2274 csum_start_off = (u32) (skb->h.raw - skb->data);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index eafabb253f08..fb7026153861 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -149,122 +149,67 @@ module_param(tg3_debug, int, 0);
149MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); 149MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
150 150
151static struct pci_device_id tg3_pci_tbl[] = { 151static struct pci_device_id tg3_pci_tbl[] = {
152 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700, 152 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
153 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
154 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701, 154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
155 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
156 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702, 156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
157 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
158 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703, 158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
159 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
160 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704, 160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
161 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
162 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE, 162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
163 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
164 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705, 164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
165 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
166 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2, 166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
167 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
168 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M, 168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
169 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
170 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2, 170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
171 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
172 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X, 172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
173 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
174 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X, 174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
175 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
176 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S, 176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
177 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
178 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3, 178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
179 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
180 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3, 180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
181 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
182 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782, 182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
183 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
184 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788, 184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
185 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
186 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789, 186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
187 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
188 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901, 188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
189 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
190 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2, 190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
191 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
192 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2, 192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
193 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
194 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F, 194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
195 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
196 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720, 196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
197 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
198 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721, 198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
199 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
200 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750, 200 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
201 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 201 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
202 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751, 202 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
203 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 203 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
204 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M, 204 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
205 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 205 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
206 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M, 206 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
207 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 207 {}
208 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
209 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752,
211 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M,
213 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754,
221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M,
223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755,
225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M,
227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786,
229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787,
231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
232 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M,
233 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
234 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
235 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
236 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S,
237 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
238 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
239 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
240 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S,
241 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
242 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
243 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
245 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
246 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
247 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
248 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
249 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
250 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
251 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
252 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
253 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
254 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
255 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
256 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
257 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
258 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
259 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
260 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
261 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
262 { 0, }
263}; 208};
264 209
265MODULE_DEVICE_TABLE(pci, tg3_pci_tbl); 210MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
266 211
267static struct { 212static const struct {
268 const char string[ETH_GSTRING_LEN]; 213 const char string[ETH_GSTRING_LEN];
269} ethtool_stats_keys[TG3_NUM_STATS] = { 214} ethtool_stats_keys[TG3_NUM_STATS] = {
270 { "rx_octets" }, 215 { "rx_octets" },
@@ -345,7 +290,7 @@ static struct {
345 { "nic_tx_threshold_hit" } 290 { "nic_tx_threshold_hit" }
346}; 291};
347 292
348static struct { 293static const struct {
349 const char string[ETH_GSTRING_LEN]; 294 const char string[ETH_GSTRING_LEN];
350} ethtool_test_keys[TG3_NUM_TEST] = { 295} ethtool_test_keys[TG3_NUM_TEST] = {
351 { "nvram test (online) " }, 296 { "nvram test (online) " },
@@ -3851,11 +3796,11 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3851 skb->h.th->check = 0; 3796 skb->h.th->check = 0;
3852 3797
3853 } 3798 }
3854 else if (skb->ip_summed == CHECKSUM_HW) 3799 else if (skb->ip_summed == CHECKSUM_PARTIAL)
3855 base_flags |= TXD_FLAG_TCPUDP_CSUM; 3800 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3856#else 3801#else
3857 mss = 0; 3802 mss = 0;
3858 if (skb->ip_summed == CHECKSUM_HW) 3803 if (skb->ip_summed == CHECKSUM_PARTIAL)
3859 base_flags |= TXD_FLAG_TCPUDP_CSUM; 3804 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3860#endif 3805#endif
3861#if TG3_VLAN_TAG_USED 3806#if TG3_VLAN_TAG_USED
@@ -3981,7 +3926,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3981 3926
3982 entry = tp->tx_prod; 3927 entry = tp->tx_prod;
3983 base_flags = 0; 3928 base_flags = 0;
3984 if (skb->ip_summed == CHECKSUM_HW) 3929 if (skb->ip_summed == CHECKSUM_PARTIAL)
3985 base_flags |= TXD_FLAG_TCPUDP_CSUM; 3930 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3986#if TG3_TSO_SUPPORT != 0 3931#if TG3_TSO_SUPPORT != 0
3987 mss = 0; 3932 mss = 0;
@@ -4969,7 +4914,7 @@ static int tg3_halt(struct tg3 *tp, int kind, int silent)
4969#define TG3_FW_BSS_ADDR 0x08000a70 4914#define TG3_FW_BSS_ADDR 0x08000a70
4970#define TG3_FW_BSS_LEN 0x10 4915#define TG3_FW_BSS_LEN 0x10
4971 4916
4972static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = { 4917static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4973 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800, 4918 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4974 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000, 4919 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4975 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034, 4920 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
@@ -5063,7 +5008,7 @@ static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5063 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000 5008 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5064}; 5009};
5065 5010
5066static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = { 5011static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5067 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430, 5012 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5068 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74, 5013 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5069 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 5014 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
@@ -5128,13 +5073,13 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5128struct fw_info { 5073struct fw_info {
5129 unsigned int text_base; 5074 unsigned int text_base;
5130 unsigned int text_len; 5075 unsigned int text_len;
5131 u32 *text_data; 5076 const u32 *text_data;
5132 unsigned int rodata_base; 5077 unsigned int rodata_base;
5133 unsigned int rodata_len; 5078 unsigned int rodata_len;
5134 u32 *rodata_data; 5079 const u32 *rodata_data;
5135 unsigned int data_base; 5080 unsigned int data_base;
5136 unsigned int data_len; 5081 unsigned int data_len;
5137 u32 *data_data; 5082 const u32 *data_data;
5138}; 5083};
5139 5084
5140/* tp->lock is held. */ 5085/* tp->lock is held. */
@@ -5266,7 +5211,7 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5266#define TG3_TSO_FW_BSS_ADDR 0x08001b80 5211#define TG3_TSO_FW_BSS_ADDR 0x08001b80
5267#define TG3_TSO_FW_BSS_LEN 0x894 5212#define TG3_TSO_FW_BSS_LEN 0x894
5268 5213
5269static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = { 5214static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5270 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000, 5215 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5271 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800, 5216 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5272 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe, 5217 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
@@ -5553,7 +5498,7 @@ static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5553 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000, 5498 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5554}; 5499};
5555 5500
5556static u32 tg3TsoFwRodata[] = { 5501static const u32 tg3TsoFwRodata[] = {
5557 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000, 5502 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5558 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f, 5503 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5559 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000, 5504 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
@@ -5561,7 +5506,7 @@ static u32 tg3TsoFwRodata[] = {
5561 0x00000000, 5506 0x00000000,
5562}; 5507};
5563 5508
5564static u32 tg3TsoFwData[] = { 5509static const u32 tg3TsoFwData[] = {
5565 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000, 5510 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5566 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 5511 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5567 0x00000000, 5512 0x00000000,
@@ -5583,7 +5528,7 @@ static u32 tg3TsoFwData[] = {
5583#define TG3_TSO5_FW_BSS_ADDR 0x00010f50 5528#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5584#define TG3_TSO5_FW_BSS_LEN 0x88 5529#define TG3_TSO5_FW_BSS_LEN 0x88
5585 5530
5586static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = { 5531static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5587 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000, 5532 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5588 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001, 5533 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5589 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe, 5534 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
@@ -5742,14 +5687,14 @@ static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5742 0x00000000, 0x00000000, 0x00000000, 5687 0x00000000, 0x00000000, 0x00000000,
5743}; 5688};
5744 5689
5745static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = { 5690static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5746 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000, 5691 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5747 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 5692 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5748 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 5693 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5749 0x00000000, 0x00000000, 0x00000000, 5694 0x00000000, 0x00000000, 0x00000000,
5750}; 5695};
5751 5696
5752static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = { 5697static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5753 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000, 5698 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5754 0x00000000, 0x00000000, 0x00000000, 5699 0x00000000, 0x00000000, 0x00000000,
5755}; 5700};
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 4103c37172f9..c6e601dc6bbc 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -830,7 +830,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
830 first_txd->addrHi = (u64)((unsigned long) skb) >> 32; 830 first_txd->addrHi = (u64)((unsigned long) skb) >> 32;
831 first_txd->processFlags = 0; 831 first_txd->processFlags = 0;
832 832
833 if(skb->ip_summed == CHECKSUM_HW) { 833 if(skb->ip_summed == CHECKSUM_PARTIAL) {
834 /* The 3XP will figure out if this is UDP/TCP */ 834 /* The 3XP will figure out if this is UDP/TCP */
835 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM; 835 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
836 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM; 836 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index ae971080e2e4..66547159bfd9 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -1230,7 +1230,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1230 rp->tx_skbuff[entry] = skb; 1230 rp->tx_skbuff[entry] = skb;
1231 1231
1232 if ((rp->quirks & rqRhineI) && 1232 if ((rp->quirks & rqRhineI) &&
1233 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)) { 1233 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1234 /* Must use alignment buffer. */ 1234 /* Must use alignment buffer. */
1235 if (skb->len > PKT_BUF_SZ) { 1235 if (skb->len > PKT_BUF_SZ) {
1236 /* packet too long, drop it */ 1236 /* packet too long, drop it */
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index aa9cd92f46b2..f1e0c746a388 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -2002,7 +2002,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2002 * Handle hardware checksum 2002 * Handle hardware checksum
2003 */ 2003 */
2004 if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM) 2004 if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM)
2005 && (skb->ip_summed == CHECKSUM_HW)) { 2005 && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2006 struct iphdr *ip = skb->nh.iph; 2006 struct iphdr *ip = skb->nh.iph;
2007 if (ip->protocol == IPPROTO_TCP) 2007 if (ip->protocol == IPPROTO_TCP)
2008 td_ptr->tdesc1.TCR |= TCR0_TCPCK; 2008 td_ptr->tdesc1.TCR |= TCR0_TCPCK;
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index a4dd13942714..170c500169da 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -19,6 +19,7 @@
19 19
20======================================================================*/ 20======================================================================*/
21 21
22#include <linux/err.h>
22#include <linux/init.h> 23#include <linux/init.h>
23 24
24#include <linux/kernel.h> 25#include <linux/kernel.h>
@@ -1203,7 +1204,7 @@ struct airo_info {
1203 struct iw_spy_data spy_data; 1204 struct iw_spy_data spy_data;
1204 struct iw_public_data wireless_data; 1205 struct iw_public_data wireless_data;
1205 /* MIC stuff */ 1206 /* MIC stuff */
1206 struct crypto_tfm *tfm; 1207 struct crypto_cipher *tfm;
1207 mic_module mod[2]; 1208 mic_module mod[2];
1208 mic_statistics micstats; 1209 mic_statistics micstats;
1209 HostRxDesc rxfids[MPI_MAX_FIDS]; // rx/tx/config MPI350 descriptors 1210 HostRxDesc rxfids[MPI_MAX_FIDS]; // rx/tx/config MPI350 descriptors
@@ -1271,7 +1272,8 @@ static int flashrestart(struct airo_info *ai,struct net_device *dev);
1271 1272
1272static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq); 1273static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq);
1273static void MoveWindow(miccntx *context, u32 micSeq); 1274static void MoveWindow(miccntx *context, u32 micSeq);
1274static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_tfm *); 1275static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen,
1276 struct crypto_cipher *tfm);
1275static void emmh32_init(emmh32_context *context); 1277static void emmh32_init(emmh32_context *context);
1276static void emmh32_update(emmh32_context *context, u8 *pOctets, int len); 1278static void emmh32_update(emmh32_context *context, u8 *pOctets, int len);
1277static void emmh32_final(emmh32_context *context, u8 digest[4]); 1279static void emmh32_final(emmh32_context *context, u8 digest[4]);
@@ -1339,10 +1341,11 @@ static int micsetup(struct airo_info *ai) {
1339 int i; 1341 int i;
1340 1342
1341 if (ai->tfm == NULL) 1343 if (ai->tfm == NULL)
1342 ai->tfm = crypto_alloc_tfm("aes", CRYPTO_TFM_REQ_MAY_SLEEP); 1344 ai->tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
1343 1345
1344 if (ai->tfm == NULL) { 1346 if (IS_ERR(ai->tfm)) {
1345 airo_print_err(ai->dev->name, "failed to load transform for AES"); 1347 airo_print_err(ai->dev->name, "failed to load transform for AES");
1348 ai->tfm = NULL;
1346 return ERROR; 1349 return ERROR;
1347 } 1350 }
1348 1351
@@ -1608,7 +1611,8 @@ static void MoveWindow(miccntx *context, u32 micSeq)
1608static unsigned char aes_counter[16]; 1611static unsigned char aes_counter[16];
1609 1612
1610/* expand the key to fill the MMH coefficient array */ 1613/* expand the key to fill the MMH coefficient array */
1611static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_tfm *tfm) 1614static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen,
1615 struct crypto_cipher *tfm)
1612{ 1616{
1613 /* take the keying material, expand if necessary, truncate at 16-bytes */ 1617 /* take the keying material, expand if necessary, truncate at 16-bytes */
1614 /* run through AES counter mode to generate context->coeff[] */ 1618 /* run through AES counter mode to generate context->coeff[] */
@@ -1616,7 +1620,6 @@ static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct
1616 int i,j; 1620 int i,j;
1617 u32 counter; 1621 u32 counter;
1618 u8 *cipher, plain[16]; 1622 u8 *cipher, plain[16];
1619 struct scatterlist sg[1];
1620 1623
1621 crypto_cipher_setkey(tfm, pkey, 16); 1624 crypto_cipher_setkey(tfm, pkey, 16);
1622 counter = 0; 1625 counter = 0;
@@ -1627,9 +1630,8 @@ static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct
1627 aes_counter[12] = (u8)(counter >> 24); 1630 aes_counter[12] = (u8)(counter >> 24);
1628 counter++; 1631 counter++;
1629 memcpy (plain, aes_counter, 16); 1632 memcpy (plain, aes_counter, 16);
1630 sg_set_buf(sg, plain, 16); 1633 crypto_cipher_encrypt_one(tfm, plain, plain);
1631 crypto_cipher_encrypt(tfm, sg, sg, 16); 1634 cipher = plain;
1632 cipher = kmap(sg->page) + sg->offset;
1633 for (j=0; (j<16) && (i< (sizeof(context->coeff)/sizeof(context->coeff[0]))); ) { 1635 for (j=0; (j<16) && (i< (sizeof(context->coeff)/sizeof(context->coeff[0]))); ) {
1634 context->coeff[i++] = ntohl(*(u32 *)&cipher[j]); 1636 context->coeff[i++] = ntohl(*(u32 *)&cipher[j]);
1635 j += 4; 1637 j += 4;
@@ -2432,7 +2434,7 @@ void stop_airo_card( struct net_device *dev, int freeres )
2432 ai->shared, ai->shared_dma); 2434 ai->shared, ai->shared_dma);
2433 } 2435 }
2434 } 2436 }
2435 crypto_free_tfm(ai->tfm); 2437 crypto_free_cipher(ai->tfm);
2436 del_airo_dev( dev ); 2438 del_airo_dev( dev );
2437 free_netdev( dev ); 2439 free_netdev( dev );
2438} 2440}
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 076bd6dcafae..7288a3eccfb3 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -176,16 +176,16 @@ static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe
176 return 0; 176 return 0;
177} 177}
178 178
179static int get_children_props(struct device_node *dn, int **drc_indexes, 179static int get_children_props(struct device_node *dn, const int **drc_indexes,
180 int **drc_names, int **drc_types, int **drc_power_domains) 180 const int **drc_names, const int **drc_types,
181 const int **drc_power_domains)
181{ 182{
182 int *indexes, *names; 183 const int *indexes, *names, *types, *domains;
183 int *types, *domains;
184 184
185 indexes = (int *) get_property(dn, "ibm,drc-indexes", NULL); 185 indexes = get_property(dn, "ibm,drc-indexes", NULL);
186 names = (int *) get_property(dn, "ibm,drc-names", NULL); 186 names = get_property(dn, "ibm,drc-names", NULL);
187 types = (int *) get_property(dn, "ibm,drc-types", NULL); 187 types = get_property(dn, "ibm,drc-types", NULL);
188 domains = (int *) get_property(dn, "ibm,drc-power-domains", NULL); 188 domains = get_property(dn, "ibm,drc-power-domains", NULL);
189 189
190 if (!indexes || !names || !types || !domains) { 190 if (!indexes || !names || !types || !domains) {
191 /* Slot does not have dynamically-removable children */ 191 /* Slot does not have dynamically-removable children */
@@ -212,13 +212,13 @@ static int get_children_props(struct device_node *dn, int **drc_indexes,
212int rpaphp_get_drc_props(struct device_node *dn, int *drc_index, 212int rpaphp_get_drc_props(struct device_node *dn, int *drc_index,
213 char **drc_name, char **drc_type, int *drc_power_domain) 213 char **drc_name, char **drc_type, int *drc_power_domain)
214{ 214{
215 int *indexes, *names; 215 const int *indexes, *names;
216 int *types, *domains; 216 const int *types, *domains;
217 unsigned int *my_index; 217 const unsigned int *my_index;
218 char *name_tmp, *type_tmp; 218 char *name_tmp, *type_tmp;
219 int i, rc; 219 int i, rc;
220 220
221 my_index = (int *) get_property(dn, "ibm,my-drc-index", NULL); 221 my_index = get_property(dn, "ibm,my-drc-index", NULL);
222 if (!my_index) { 222 if (!my_index) {
223 /* Node isn't DLPAR/hotplug capable */ 223 /* Node isn't DLPAR/hotplug capable */
224 return -EINVAL; 224 return -EINVAL;
@@ -265,10 +265,10 @@ static int is_php_type(char *drc_type)
265 return 1; 265 return 1;
266} 266}
267 267
268static int is_php_dn(struct device_node *dn, int **indexes, int **names, 268static int is_php_dn(struct device_node *dn, const int **indexes,
269 int **types, int **power_domains) 269 const int **names, const int **types, const int **power_domains)
270{ 270{
271 int *drc_types; 271 const int *drc_types;
272 int rc; 272 int rc;
273 273
274 rc = get_children_props(dn, indexes, names, &drc_types, power_domains); 274 rc = get_children_props(dn, indexes, names, &drc_types, power_domains);
@@ -296,7 +296,7 @@ int rpaphp_add_slot(struct device_node *dn)
296 struct slot *slot; 296 struct slot *slot;
297 int retval = 0; 297 int retval = 0;
298 int i; 298 int i;
299 int *indexes, *names, *types, *power_domains; 299 const int *indexes, *names, *types, *power_domains;
300 char *name, *type; 300 char *name, *type;
301 301
302 dbg("Entry %s: dn->full_name=%s\n", __FUNCTION__, dn->full_name); 302 dbg("Entry %s: dn->full_name=%s\n", __FUNCTION__, dn->full_name);
diff --git a/drivers/s390/Kconfig b/drivers/s390/Kconfig
index 4d36208ff8de..ae89b9b88743 100644
--- a/drivers/s390/Kconfig
+++ b/drivers/s390/Kconfig
@@ -213,17 +213,35 @@ config MONREADER
213 help 213 help
214 Character device driver for reading z/VM monitor service records 214 Character device driver for reading z/VM monitor service records
215 215
216config MONWRITER
217 tristate "API for writing z/VM monitor service records"
218 default "m"
219 help
220 Character device driver for writing z/VM monitor service records
221
216endmenu 222endmenu
217 223
218menu "Cryptographic devices" 224menu "Cryptographic devices"
219 225
220config Z90CRYPT 226config ZCRYPT
221 tristate "Support for PCI-attached cryptographic adapters" 227 tristate "Support for PCI-attached cryptographic adapters"
222 default "m" 228 select ZCRYPT_MONOLITHIC if ZCRYPT="y"
223 help 229 default "m"
230 help
224 Select this option if you want to use a PCI-attached cryptographic 231 Select this option if you want to use a PCI-attached cryptographic
225 adapter like the PCI Cryptographic Accelerator (PCICA) or the PCI 232 adapter like:
226 Cryptographic Coprocessor (PCICC). This option is also available 233 + PCI Cryptographic Accelerator (PCICA)
227 as a module called z90crypt.ko. 234 + PCI Cryptographic Coprocessor (PCICC)
235 + PCI-X Cryptographic Coprocessor (PCIXCC)
236 + Crypto Express2 Coprocessor (CEX2C)
237 + Crypto Express2 Accelerator (CEX2A)
238
239config ZCRYPT_MONOLITHIC
240 bool "Monolithic zcrypt module"
241 depends on ZCRYPT="m"
242 help
243 Select this option if you want to have a single module z90crypt.ko
244 that contains all parts of the crypto device driver (ap bus,
245 request router and all the card drivers).
228 246
229endmenu 247endmenu
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 25c1ef6dfd44..d0647d116eaa 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -184,7 +184,7 @@ dasd_state_known_to_basic(struct dasd_device * device)
184 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2, 184 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2,
185 8 * sizeof (long)); 185 8 * sizeof (long));
186 debug_register_view(device->debug_area, &debug_sprintf_view); 186 debug_register_view(device->debug_area, &debug_sprintf_view);
187 debug_set_level(device->debug_area, DBF_EMERG); 187 debug_set_level(device->debug_area, DBF_WARNING);
188 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 188 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
189 189
190 device->state = DASD_STATE_BASIC; 190 device->state = DASD_STATE_BASIC;
@@ -893,7 +893,7 @@ dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm)
893 893
894 device = (struct dasd_device *) cqr->device; 894 device = (struct dasd_device *) cqr->device;
895 if (device == NULL || 895 if (device == NULL ||
896 device != dasd_device_from_cdev(cdev) || 896 device != dasd_device_from_cdev_locked(cdev) ||
897 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 897 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
898 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 898 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
899 cdev->dev.bus_id); 899 cdev->dev.bus_id);
@@ -970,7 +970,7 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
970 /* first of all check for state change pending interrupt */ 970 /* first of all check for state change pending interrupt */
971 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 971 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
972 if ((irb->scsw.dstat & mask) == mask) { 972 if ((irb->scsw.dstat & mask) == mask) {
973 device = dasd_device_from_cdev(cdev); 973 device = dasd_device_from_cdev_locked(cdev);
974 if (!IS_ERR(device)) { 974 if (!IS_ERR(device)) {
975 dasd_handle_state_change_pending(device); 975 dasd_handle_state_change_pending(device);
976 dasd_put_device(device); 976 dasd_put_device(device);
@@ -2169,7 +2169,7 @@ dasd_init(void)
2169 goto failed; 2169 goto failed;
2170 } 2170 }
2171 debug_register_view(dasd_debug_area, &debug_sprintf_view); 2171 debug_register_view(dasd_debug_area, &debug_sprintf_view);
2172 debug_set_level(dasd_debug_area, DBF_EMERG); 2172 debug_set_level(dasd_debug_area, DBF_WARNING);
2173 2173
2174 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 2174 DBF_EVENT(DBF_EMERG, "%s", "debug area created");
2175 2175
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 9af02c79ce8a..91cf971f0652 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -258,8 +258,12 @@ dasd_parse_keyword( char *parsestring ) {
258 return residual_str; 258 return residual_str;
259 } 259 }
260 if (strncmp("nopav", parsestring, length) == 0) { 260 if (strncmp("nopav", parsestring, length) == 0) {
261 dasd_nopav = 1; 261 if (MACHINE_IS_VM)
262 MESSAGE(KERN_INFO, "%s", "disable PAV mode"); 262 MESSAGE(KERN_INFO, "%s", "'nopav' not supported on VM");
263 else {
264 dasd_nopav = 1;
265 MESSAGE(KERN_INFO, "%s", "disable PAV mode");
266 }
263 return residual_str; 267 return residual_str;
264 } 268 }
265 if (strncmp("fixedbuffers", parsestring, length) == 0) { 269 if (strncmp("fixedbuffers", parsestring, length) == 0) {
@@ -523,17 +527,17 @@ dasd_create_device(struct ccw_device *cdev)
523{ 527{
524 struct dasd_devmap *devmap; 528 struct dasd_devmap *devmap;
525 struct dasd_device *device; 529 struct dasd_device *device;
530 unsigned long flags;
526 int rc; 531 int rc;
527 532
528 devmap = dasd_devmap_from_cdev(cdev); 533 devmap = dasd_devmap_from_cdev(cdev);
529 if (IS_ERR(devmap)) 534 if (IS_ERR(devmap))
530 return (void *) devmap; 535 return (void *) devmap;
531 cdev->dev.driver_data = devmap;
532 536
533 device = dasd_alloc_device(); 537 device = dasd_alloc_device();
534 if (IS_ERR(device)) 538 if (IS_ERR(device))
535 return device; 539 return device;
536 atomic_set(&device->ref_count, 2); 540 atomic_set(&device->ref_count, 3);
537 541
538 spin_lock(&dasd_devmap_lock); 542 spin_lock(&dasd_devmap_lock);
539 if (!devmap->device) { 543 if (!devmap->device) {
@@ -552,6 +556,11 @@ dasd_create_device(struct ccw_device *cdev)
552 dasd_free_device(device); 556 dasd_free_device(device);
553 return ERR_PTR(rc); 557 return ERR_PTR(rc);
554 } 558 }
559
560 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
561 cdev->dev.driver_data = device;
562 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
563
555 return device; 564 return device;
556} 565}
557 566
@@ -569,6 +578,7 @@ dasd_delete_device(struct dasd_device *device)
569{ 578{
570 struct ccw_device *cdev; 579 struct ccw_device *cdev;
571 struct dasd_devmap *devmap; 580 struct dasd_devmap *devmap;
581 unsigned long flags;
572 582
573 /* First remove device pointer from devmap. */ 583 /* First remove device pointer from devmap. */
574 devmap = dasd_find_busid(device->cdev->dev.bus_id); 584 devmap = dasd_find_busid(device->cdev->dev.bus_id);
@@ -582,9 +592,16 @@ dasd_delete_device(struct dasd_device *device)
582 devmap->device = NULL; 592 devmap->device = NULL;
583 spin_unlock(&dasd_devmap_lock); 593 spin_unlock(&dasd_devmap_lock);
584 594
585 /* Drop ref_count by 2, one for the devmap reference and 595 /* Disconnect dasd_device structure from ccw_device structure. */
586 * one for the passed reference. */ 596 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
587 atomic_sub(2, &device->ref_count); 597 device->cdev->dev.driver_data = NULL;
598 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
599
600 /*
601 * Drop ref_count by 3, one for the devmap reference, one for
602 * the cdev reference and one for the passed reference.
603 */
604 atomic_sub(3, &device->ref_count);
588 605
589 /* Wait for reference counter to drop to zero. */ 606 /* Wait for reference counter to drop to zero. */
590 wait_event(dasd_delete_wq, atomic_read(&device->ref_count) == 0); 607 wait_event(dasd_delete_wq, atomic_read(&device->ref_count) == 0);
@@ -593,9 +610,6 @@ dasd_delete_device(struct dasd_device *device)
593 cdev = device->cdev; 610 cdev = device->cdev;
594 device->cdev = NULL; 611 device->cdev = NULL;
595 612
596 /* Disconnect dasd_devmap structure from ccw_device structure. */
597 cdev->dev.driver_data = NULL;
598
599 /* Put ccw_device structure. */ 613 /* Put ccw_device structure. */
600 put_device(&cdev->dev); 614 put_device(&cdev->dev);
601 615
@@ -615,21 +629,32 @@ dasd_put_device_wake(struct dasd_device *device)
615 629
616/* 630/*
617 * Return dasd_device structure associated with cdev. 631 * Return dasd_device structure associated with cdev.
632 * This function needs to be called with the ccw device
633 * lock held. It can be used from interrupt context.
634 */
635struct dasd_device *
636dasd_device_from_cdev_locked(struct ccw_device *cdev)
637{
638 struct dasd_device *device = cdev->dev.driver_data;
639
640 if (!device)
641 return ERR_PTR(-ENODEV);
642 dasd_get_device(device);
643 return device;
644}
645
646/*
647 * Return dasd_device structure associated with cdev.
618 */ 648 */
619struct dasd_device * 649struct dasd_device *
620dasd_device_from_cdev(struct ccw_device *cdev) 650dasd_device_from_cdev(struct ccw_device *cdev)
621{ 651{
622 struct dasd_devmap *devmap;
623 struct dasd_device *device; 652 struct dasd_device *device;
653 unsigned long flags;
624 654
625 device = ERR_PTR(-ENODEV); 655 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
626 spin_lock(&dasd_devmap_lock); 656 device = dasd_device_from_cdev_locked(cdev);
627 devmap = cdev->dev.driver_data; 657 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
628 if (devmap && devmap->device) {
629 device = devmap->device;
630 dasd_get_device(device);
631 }
632 spin_unlock(&dasd_devmap_lock);
633 return device; 658 return device;
634} 659}
635 660
@@ -730,16 +755,17 @@ static ssize_t
730dasd_discipline_show(struct device *dev, struct device_attribute *attr, 755dasd_discipline_show(struct device *dev, struct device_attribute *attr,
731 char *buf) 756 char *buf)
732{ 757{
733 struct dasd_devmap *devmap; 758 struct dasd_device *device;
734 char *dname; 759 ssize_t len;
735 760
736 spin_lock(&dasd_devmap_lock); 761 device = dasd_device_from_cdev(to_ccwdev(dev));
737 dname = "none"; 762 if (!IS_ERR(device) && device->discipline) {
738 devmap = dev->driver_data; 763 len = snprintf(buf, PAGE_SIZE, "%s\n",
739 if (devmap && devmap->device && devmap->device->discipline) 764 device->discipline->name);
740 dname = devmap->device->discipline->name; 765 dasd_put_device(device);
741 spin_unlock(&dasd_devmap_lock); 766 } else
742 return snprintf(buf, PAGE_SIZE, "%s\n", dname); 767 len = snprintf(buf, PAGE_SIZE, "none\n");
768 return len;
743} 769}
744 770
745static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL); 771static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL);
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index da65f1b032f5..e0bf30ebb215 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -678,7 +678,7 @@ int __init dasd_eer_init(void)
678 return 0; 678 return 0;
679} 679}
680 680
681void __exit dasd_eer_exit(void) 681void dasd_eer_exit(void)
682{ 682{
683 WARN_ON(misc_deregister(&dasd_eer_dev) != 0); 683 WARN_ON(misc_deregister(&dasd_eer_dev) != 0);
684} 684}
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 3ccf06d28ba1..9f52004f6fc2 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -534,6 +534,7 @@ int dasd_add_sysfs_files(struct ccw_device *);
534void dasd_remove_sysfs_files(struct ccw_device *); 534void dasd_remove_sysfs_files(struct ccw_device *);
535 535
536struct dasd_device *dasd_device_from_cdev(struct ccw_device *); 536struct dasd_device *dasd_device_from_cdev(struct ccw_device *);
537struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *);
537struct dasd_device *dasd_device_from_devindex(int); 538struct dasd_device *dasd_device_from_devindex(int);
538 539
539int dasd_parse(void); 540int dasd_parse(void);
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index ca7d51f7eccc..cab2c736683a 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -453,7 +453,7 @@ static int __init xpram_init(void)
453 PRINT_WARN("No expanded memory available\n"); 453 PRINT_WARN("No expanded memory available\n");
454 return -ENODEV; 454 return -ENODEV;
455 } 455 }
456 xpram_pages = xpram_highest_page_index(); 456 xpram_pages = xpram_highest_page_index() + 1;
457 PRINT_INFO(" %u pages expanded memory found (%lu KB).\n", 457 PRINT_INFO(" %u pages expanded memory found (%lu KB).\n",
458 xpram_pages, (unsigned long) xpram_pages*4); 458 xpram_pages, (unsigned long) xpram_pages*4);
459 rc = xpram_setup_sizes(xpram_pages); 459 rc = xpram_setup_sizes(xpram_pages);
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 0c0162ff6c0c..c3e97b4fc186 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -28,3 +28,4 @@ obj-$(CONFIG_S390_TAPE) += tape.o tape_class.o
28obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o 28obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o
29obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o 29obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o
30obj-$(CONFIG_MONREADER) += monreader.o 30obj-$(CONFIG_MONREADER) += monreader.o
31obj-$(CONFIG_MONWRITER) += monwriter.o
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
new file mode 100644
index 000000000000..1e3939aeb8ab
--- /dev/null
+++ b/drivers/s390/char/monwriter.c
@@ -0,0 +1,292 @@
1/*
2 * drivers/s390/char/monwriter.c
3 *
4 * Character device driver for writing z/VM *MONITOR service records.
5 *
6 * Copyright (C) IBM Corp. 2006
7 *
8 * Author(s): Melissa Howland <Melissa.Howland@us.ibm.com>
9 */
10
11#include <linux/module.h>
12#include <linux/moduleparam.h>
13#include <linux/init.h>
14#include <linux/errno.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/miscdevice.h>
18#include <linux/ctype.h>
19#include <linux/poll.h>
20#include <asm/uaccess.h>
21#include <asm/ebcdic.h>
22#include <asm/io.h>
23#include <asm/appldata.h>
24#include <asm/monwriter.h>
25
26#define MONWRITE_MAX_DATALEN 4024
27
28static int mon_max_bufs = 255;
29
30struct mon_buf {
31 struct list_head list;
32 struct monwrite_hdr hdr;
33 int diag_done;
34 char *data;
35};
36
37struct mon_private {
38 struct list_head list;
39 struct monwrite_hdr hdr;
40 size_t hdr_to_read;
41 size_t data_to_read;
42 struct mon_buf *current_buf;
43 int mon_buf_count;
44};
45
46/*
47 * helper functions
48 */
49
50static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
51{
52 struct appldata_product_id id;
53 int rc;
54
55 strcpy(id.prod_nr, "LNXAPPL");
56 id.prod_fn = myhdr->applid;
57 id.record_nr = myhdr->record_num;
58 id.version_nr = myhdr->version;
59 id.release_nr = myhdr->release;
60 id.mod_lvl = myhdr->mod_level;
61 rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen);
62 if (rc <= 0)
63 return rc;
64 if (rc == 5)
65 return -EPERM;
66 printk("DIAG X'DC' error with return code: %i\n", rc);
67 return -EINVAL;
68}
69
70static inline struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv,
71 struct monwrite_hdr *monhdr)
72{
73 struct mon_buf *entry, *next;
74
75 list_for_each_entry_safe(entry, next, &monpriv->list, list)
76 if (entry->hdr.applid == monhdr->applid &&
77 entry->hdr.record_num == monhdr->record_num &&
78 entry->hdr.version == monhdr->version &&
79 entry->hdr.release == monhdr->release &&
80 entry->hdr.mod_level == monhdr->mod_level)
81 return entry;
82 return NULL;
83}
84
85static int monwrite_new_hdr(struct mon_private *monpriv)
86{
87 struct monwrite_hdr *monhdr = &monpriv->hdr;
88 struct mon_buf *monbuf;
89 int rc;
90
91 if (monhdr->datalen > MONWRITE_MAX_DATALEN ||
92 monhdr->mon_function > MONWRITE_START_CONFIG ||
93 monhdr->hdrlen != sizeof(struct monwrite_hdr))
94 return -EINVAL;
95 monbuf = monwrite_find_hdr(monpriv, monhdr);
96 if (monbuf) {
97 if (monhdr->mon_function == MONWRITE_STOP_INTERVAL) {
98 monhdr->datalen = monbuf->hdr.datalen;
99 rc = monwrite_diag(monhdr, monbuf->data,
100 APPLDATA_STOP_REC);
101 list_del(&monbuf->list);
102 monpriv->mon_buf_count--;
103 kfree(monbuf->data);
104 kfree(monbuf);
105 monbuf = NULL;
106 }
107 } else {
108 if (monpriv->mon_buf_count >= mon_max_bufs)
109 return -ENOSPC;
110 monbuf = kzalloc(sizeof(struct mon_buf), GFP_KERNEL);
111 if (!monbuf)
112 return -ENOMEM;
113 monbuf->data = kzalloc(monbuf->hdr.datalen,
114 GFP_KERNEL | GFP_DMA);
115 if (!monbuf->data) {
116 kfree(monbuf);
117 return -ENOMEM;
118 }
119 monbuf->hdr = *monhdr;
120 list_add_tail(&monbuf->list, &monpriv->list);
121 monpriv->mon_buf_count++;
122 }
123 monpriv->current_buf = monbuf;
124 return 0;
125}
126
127static int monwrite_new_data(struct mon_private *monpriv)
128{
129 struct monwrite_hdr *monhdr = &monpriv->hdr;
130 struct mon_buf *monbuf = monpriv->current_buf;
131 int rc = 0;
132
133 switch (monhdr->mon_function) {
134 case MONWRITE_START_INTERVAL:
135 if (!monbuf->diag_done) {
136 rc = monwrite_diag(monhdr, monbuf->data,
137 APPLDATA_START_INTERVAL_REC);
138 monbuf->diag_done = 1;
139 }
140 break;
141 case MONWRITE_START_CONFIG:
142 if (!monbuf->diag_done) {
143 rc = monwrite_diag(monhdr, monbuf->data,
144 APPLDATA_START_CONFIG_REC);
145 monbuf->diag_done = 1;
146 }
147 break;
148 case MONWRITE_GEN_EVENT:
149 rc = monwrite_diag(monhdr, monbuf->data,
150 APPLDATA_GEN_EVENT_REC);
151 list_del(&monpriv->current_buf->list);
152 kfree(monpriv->current_buf->data);
153 kfree(monpriv->current_buf);
154 monpriv->current_buf = NULL;
155 break;
156 default:
157 /* monhdr->mon_function is checked in monwrite_new_hdr */
158 BUG();
159 }
160 return rc;
161}
162
163/*
164 * file operations
165 */
166
167static int monwrite_open(struct inode *inode, struct file *filp)
168{
169 struct mon_private *monpriv;
170
171 monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
172 if (!monpriv)
173 return -ENOMEM;
174 INIT_LIST_HEAD(&monpriv->list);
175 monpriv->hdr_to_read = sizeof(monpriv->hdr);
176 filp->private_data = monpriv;
177 return nonseekable_open(inode, filp);
178}
179
180static int monwrite_close(struct inode *inode, struct file *filp)
181{
182 struct mon_private *monpriv = filp->private_data;
183 struct mon_buf *entry, *next;
184
185 list_for_each_entry_safe(entry, next, &monpriv->list, list) {
186 if (entry->hdr.mon_function != MONWRITE_GEN_EVENT)
187 monwrite_diag(&entry->hdr, entry->data,
188 APPLDATA_STOP_REC);
189 monpriv->mon_buf_count--;
190 list_del(&entry->list);
191 kfree(entry->data);
192 kfree(entry);
193 }
194 kfree(monpriv);
195 return 0;
196}
197
198static ssize_t monwrite_write(struct file *filp, const char __user *data,
199 size_t count, loff_t *ppos)
200{
201 struct mon_private *monpriv = filp->private_data;
202 size_t len, written;
203 void *to;
204 int rc;
205
206 for (written = 0; written < count; ) {
207 if (monpriv->hdr_to_read) {
208 len = min(count - written, monpriv->hdr_to_read);
209 to = (char *) &monpriv->hdr +
210 sizeof(monpriv->hdr) - monpriv->hdr_to_read;
211 if (copy_from_user(to, data + written, len)) {
212 rc = -EFAULT;
213 goto out_error;
214 }
215 monpriv->hdr_to_read -= len;
216 written += len;
217 if (monpriv->hdr_to_read > 0)
218 continue;
219 rc = monwrite_new_hdr(monpriv);
220 if (rc)
221 goto out_error;
222 monpriv->data_to_read = monpriv->current_buf ?
223 monpriv->current_buf->hdr.datalen : 0;
224 }
225
226 if (monpriv->data_to_read) {
227 len = min(count - written, monpriv->data_to_read);
228 to = monpriv->current_buf->data +
229 monpriv->hdr.datalen - monpriv->data_to_read;
230 if (copy_from_user(to, data + written, len)) {
231 rc = -EFAULT;
232 goto out_error;
233 }
234 monpriv->data_to_read -= len;
235 written += len;
236 if (monpriv->data_to_read > 0)
237 continue;
238 rc = monwrite_new_data(monpriv);
239 if (rc)
240 goto out_error;
241 }
242 monpriv->hdr_to_read = sizeof(monpriv->hdr);
243 }
244 return written;
245
246out_error:
247 monpriv->data_to_read = 0;
248 monpriv->hdr_to_read = sizeof(struct monwrite_hdr);
249 return rc;
250}
251
252static struct file_operations monwrite_fops = {
253 .owner = THIS_MODULE,
254 .open = &monwrite_open,
255 .release = &monwrite_close,
256 .write = &monwrite_write,
257};
258
259static struct miscdevice mon_dev = {
260 .name = "monwriter",
261 .fops = &monwrite_fops,
262 .minor = MISC_DYNAMIC_MINOR,
263};
264
265/*
266 * module init/exit
267 */
268
269static int __init mon_init(void)
270{
271 if (MACHINE_IS_VM)
272 return misc_register(&mon_dev);
273 else
274 return -ENODEV;
275}
276
277static void __exit mon_exit(void)
278{
279 WARN_ON(misc_deregister(&mon_dev) != 0);
280}
281
282module_init(mon_init);
283module_exit(mon_exit);
284
285module_param_named(max_bufs, mon_max_bufs, int, 0644);
286MODULE_PARM_DESC(max_bufs, "Maximum number of sample monitor data buffers"
287 "that can be active at one time");
288
289MODULE_AUTHOR("Melissa Howland <Melissa.Howland@us.ibm.com>");
290MODULE_DESCRIPTION("Character device driver for writing z/VM "
291 "APPLDATA monitor records.");
292MODULE_LICENSE("GPL");
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 19762f3476aa..1678b6c757ec 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2004,2005 IBM Corporation 2 * Copyright (C) 2004,2005 IBM Corporation
3 * Interface implementation for communication with the v/VM control program 3 * Interface implementation for communication with the z/VM control program
4 * Author(s): Christian Borntraeger <cborntra@de.ibm.com> 4 * Author(s): Christian Borntraeger <cborntra@de.ibm.com>
5 * 5 *
6 * 6 *
diff --git a/drivers/s390/char/vmcp.h b/drivers/s390/char/vmcp.h
index 87389e730465..8a5975f3dad7 100644
--- a/drivers/s390/char/vmcp.h
+++ b/drivers/s390/char/vmcp.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2004, 2005 IBM Corporation 2 * Copyright (C) 2004, 2005 IBM Corporation
3 * Interface implementation for communication with the v/VM control program 3 * Interface implementation for communication with the z/VM control program
4 * Version 1.0 4 * Version 1.0
5 * Author(s): Christian Borntraeger <cborntra@de.ibm.com> 5 * Author(s): Christian Borntraeger <cborntra@de.ibm.com>
6 * 6 *
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index c28444af0919..3bb4e472d73d 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -256,7 +256,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
256 /* trigger path verification. */ 256 /* trigger path verification. */
257 if (sch->driver && sch->driver->verify) 257 if (sch->driver && sch->driver->verify)
258 sch->driver->verify(&sch->dev); 258 sch->driver->verify(&sch->dev);
259 else if (sch->vpm == mask) 259 else if (sch->lpm == mask)
260 goto out_unreg; 260 goto out_unreg;
261out_unlock: 261out_unlock:
262 spin_unlock_irq(&sch->lock); 262 spin_unlock_irq(&sch->lock);
@@ -378,6 +378,7 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
378 378
379 if (chp_mask == 0) { 379 if (chp_mask == 0) {
380 spin_unlock_irq(&sch->lock); 380 spin_unlock_irq(&sch->lock);
381 put_device(&sch->dev);
381 return 0; 382 return 0;
382 } 383 }
383 old_lpm = sch->lpm; 384 old_lpm = sch->lpm;
@@ -392,7 +393,7 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
392 393
393 spin_unlock_irq(&sch->lock); 394 spin_unlock_irq(&sch->lock);
394 put_device(&sch->dev); 395 put_device(&sch->dev);
395 return (res_data->fla_mask == 0xffff) ? -ENODEV : 0; 396 return 0;
396} 397}
397 398
398 399
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 89320c1ad825..2e2882daefbb 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -16,11 +16,10 @@
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/kernel_stat.h> 17#include <linux/kernel_stat.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19
20#include <asm/cio.h> 19#include <asm/cio.h>
21#include <asm/delay.h> 20#include <asm/delay.h>
22#include <asm/irq.h> 21#include <asm/irq.h>
23 22#include <asm/setup.h>
24#include "airq.h" 23#include "airq.h"
25#include "cio.h" 24#include "cio.h"
26#include "css.h" 25#include "css.h"
@@ -192,7 +191,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
192 sch->orb.pfch = sch->options.prefetch == 0; 191 sch->orb.pfch = sch->options.prefetch == 0;
193 sch->orb.spnd = sch->options.suspend; 192 sch->orb.spnd = sch->options.suspend;
194 sch->orb.ssic = sch->options.suspend && sch->options.inter; 193 sch->orb.ssic = sch->options.suspend && sch->options.inter;
195 sch->orb.lpm = (lpm != 0) ? (lpm & sch->opm) : sch->lpm; 194 sch->orb.lpm = (lpm != 0) ? lpm : sch->lpm;
196#ifdef CONFIG_64BIT 195#ifdef CONFIG_64BIT
197 /* 196 /*
198 * for 64 bit we always support 64 bit IDAWs with 4k page size only 197 * for 64 bit we always support 64 bit IDAWs with 4k page size only
@@ -570,10 +569,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
570 sch->opm = 0xff; 569 sch->opm = 0xff;
571 if (!cio_is_console(sch->schid)) 570 if (!cio_is_console(sch->schid))
572 chsc_validate_chpids(sch); 571 chsc_validate_chpids(sch);
573 sch->lpm = sch->schib.pmcw.pim & 572 sch->lpm = sch->schib.pmcw.pam & sch->opm;
574 sch->schib.pmcw.pam &
575 sch->schib.pmcw.pom &
576 sch->opm;
577 573
578 CIO_DEBUG(KERN_INFO, 0, 574 CIO_DEBUG(KERN_INFO, 0,
579 "Detected device %04x on subchannel 0.%x.%04X" 575 "Detected device %04x on subchannel 0.%x.%04X"
@@ -841,14 +837,26 @@ __clear_subchannel_easy(struct subchannel_id schid)
841 return -EBUSY; 837 return -EBUSY;
842} 838}
843 839
844extern void do_reipl(unsigned long devno); 840struct sch_match_id {
845static int 841 struct subchannel_id schid;
846__shutdown_subchannel_easy(struct subchannel_id schid, void *data) 842 struct ccw_dev_id devid;
843 int rc;
844};
845
846static int __shutdown_subchannel_easy_and_match(struct subchannel_id schid,
847 void *data)
847{ 848{
848 struct schib schib; 849 struct schib schib;
850 struct sch_match_id *match_id = data;
849 851
850 if (stsch_err(schid, &schib)) 852 if (stsch_err(schid, &schib))
851 return -ENXIO; 853 return -ENXIO;
854 if (match_id && schib.pmcw.dnv &&
855 (schib.pmcw.dev == match_id->devid.devno) &&
856 (schid.ssid == match_id->devid.ssid)) {
857 match_id->schid = schid;
858 match_id->rc = 0;
859 }
852 if (!schib.pmcw.ena) 860 if (!schib.pmcw.ena)
853 return 0; 861 return 0;
854 switch(__disable_subchannel_easy(schid, &schib)) { 862 switch(__disable_subchannel_easy(schid, &schib)) {
@@ -864,18 +872,71 @@ __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
864 return 0; 872 return 0;
865} 873}
866 874
867void 875static int clear_all_subchannels_and_match(struct ccw_dev_id *devid,
868clear_all_subchannels(void) 876 struct subchannel_id *schid)
869{ 877{
878 struct sch_match_id match_id;
879
880 match_id.devid = *devid;
881 match_id.rc = -ENODEV;
870 local_irq_disable(); 882 local_irq_disable();
871 for_each_subchannel(__shutdown_subchannel_easy, NULL); 883 for_each_subchannel(__shutdown_subchannel_easy_and_match, &match_id);
884 if (match_id.rc == 0)
885 *schid = match_id.schid;
886 return match_id.rc;
872} 887}
873 888
889
890void clear_all_subchannels(void)
891{
892 local_irq_disable();
893 for_each_subchannel(__shutdown_subchannel_easy_and_match, NULL);
894}
895
896extern void do_reipl_asm(__u32 schid);
897
874/* Make sure all subchannels are quiet before we re-ipl an lpar. */ 898/* Make sure all subchannels are quiet before we re-ipl an lpar. */
875void 899void reipl_ccw_dev(struct ccw_dev_id *devid)
876reipl(unsigned long devno)
877{ 900{
878 clear_all_subchannels(); 901 struct subchannel_id schid;
902
903 if (clear_all_subchannels_and_match(devid, &schid))
904 panic("IPL Device not found\n");
879 cio_reset_channel_paths(); 905 cio_reset_channel_paths();
880 do_reipl(devno); 906 do_reipl_asm(*((__u32*)&schid));
907}
908
909extern struct schib ipl_schib;
910
911/*
912 * ipl_save_parameters gets called very early. It is not allowed to access
913 * anything in the bss section at all. The bss section is not cleared yet,
914 * but may contain some ipl parameters written by the firmware.
915 * These parameters (if present) are copied to 0x2000.
916 * To avoid corruption of the ipl parameters, all variables used by this
917 * function must reside on the stack or in the data section.
918 */
919void ipl_save_parameters(void)
920{
921 struct subchannel_id schid;
922 unsigned int *ipl_ptr;
923 void *src, *dst;
924
925 schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID;
926 if (!schid.one)
927 return;
928 if (stsch(schid, &ipl_schib))
929 return;
930 if (!ipl_schib.pmcw.dnv)
931 return;
932 ipl_devno = ipl_schib.pmcw.dev;
933 ipl_flags |= IPL_DEVNO_VALID;
934 if (!ipl_schib.pmcw.qf)
935 return;
936 ipl_flags |= IPL_PARMBLOCK_VALID;
937 ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR;
938 src = (void *)(unsigned long)*ipl_ptr;
939 dst = (void *)IPL_PARMBLOCK_ORIGIN;
940 memmove(dst, src, PAGE_SIZE);
941 *ipl_ptr = IPL_PARMBLOCK_ORIGIN;
881} 942}
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 13eeea3d547f..7086a74e9871 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -182,136 +182,141 @@ get_subchannel_by_schid(struct subchannel_id schid)
182 return dev ? to_subchannel(dev) : NULL; 182 return dev ? to_subchannel(dev) : NULL;
183} 183}
184 184
185 185static inline int css_get_subchannel_status(struct subchannel *sch)
186static inline int
187css_get_subchannel_status(struct subchannel *sch, struct subchannel_id schid)
188{ 186{
189 struct schib schib; 187 struct schib schib;
190 int cc;
191 188
192 cc = stsch(schid, &schib); 189 if (stsch(sch->schid, &schib) || !schib.pmcw.dnv)
193 if (cc)
194 return CIO_GONE;
195 if (!schib.pmcw.dnv)
196 return CIO_GONE; 190 return CIO_GONE;
197 if (sch && sch->schib.pmcw.dnv && 191 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
198 (schib.pmcw.dev != sch->schib.pmcw.dev))
199 return CIO_REVALIDATE; 192 return CIO_REVALIDATE;
200 if (sch && !sch->lpm) 193 if (!sch->lpm)
201 return CIO_NO_PATH; 194 return CIO_NO_PATH;
202 return CIO_OPER; 195 return CIO_OPER;
203} 196}
204 197
205static int 198static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
206css_evaluate_subchannel(struct subchannel_id schid, int slow)
207{ 199{
208 int event, ret, disc; 200 int event, ret, disc;
209 struct subchannel *sch;
210 unsigned long flags; 201 unsigned long flags;
202 enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
211 203
212 sch = get_subchannel_by_schid(schid); 204 spin_lock_irqsave(&sch->lock, flags);
213 disc = sch ? device_is_disconnected(sch) : 0; 205 disc = device_is_disconnected(sch);
214 if (disc && slow) { 206 if (disc && slow) {
215 if (sch) 207 /* Disconnected devices are evaluated directly only.*/
216 put_device(&sch->dev); 208 spin_unlock_irqrestore(&sch->lock, flags);
217 return 0; /* Already processed. */ 209 return 0;
218 } 210 }
219 /* 211 /* No interrupt after machine check - kill pending timers. */
220 * We've got a machine check, so running I/O won't get an interrupt. 212 device_kill_pending_timer(sch);
221 * Kill any pending timers.
222 */
223 if (sch)
224 device_kill_pending_timer(sch);
225 if (!disc && !slow) { 213 if (!disc && !slow) {
226 if (sch) 214 /* Non-disconnected devices are evaluated on the slow path. */
227 put_device(&sch->dev); 215 spin_unlock_irqrestore(&sch->lock, flags);
228 return -EAGAIN; /* Will be done on the slow path. */ 216 return -EAGAIN;
229 } 217 }
230 event = css_get_subchannel_status(sch, schid); 218 event = css_get_subchannel_status(sch);
231 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n", 219 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
232 schid.ssid, schid.sch_no, event, 220 sch->schid.ssid, sch->schid.sch_no, event,
233 sch?(disc?"disconnected":"normal"):"unknown", 221 disc ? "disconnected" : "normal",
234 slow?"slow":"fast"); 222 slow ? "slow" : "fast");
223 /* Analyze subchannel status. */
224 action = NONE;
235 switch (event) { 225 switch (event) {
236 case CIO_NO_PATH: 226 case CIO_NO_PATH:
237 case CIO_GONE: 227 if (disc) {
238 if (!sch) { 228 /* Check if paths have become available. */
239 /* Never used this subchannel. Ignore. */ 229 action = REPROBE;
240 ret = 0;
241 break; 230 break;
242 } 231 }
243 if (disc && (event == CIO_NO_PATH)) { 232 /* fall through */
244 /* 233 case CIO_GONE:
245 * Uargh, hack again. Because we don't get a machine 234 /* Prevent unwanted effects when opening lock. */
246 * check on configure on, our path bookkeeping can 235 cio_disable_subchannel(sch);
247 * be out of date here (it's fine while we only do 236 device_set_disconnected(sch);
248 * logical varying or get chsc machine checks). We 237 /* Ask driver what to do with device. */
249 * need to force reprobing or we might miss devices 238 action = UNREGISTER;
250 * coming operational again. It won't do harm in real 239 if (sch->driver && sch->driver->notify) {
251 * no path situations.
252 */
253 spin_lock_irqsave(&sch->lock, flags);
254 device_trigger_reprobe(sch);
255 spin_unlock_irqrestore(&sch->lock, flags); 240 spin_unlock_irqrestore(&sch->lock, flags);
256 ret = 0; 241 ret = sch->driver->notify(&sch->dev, event);
257 break; 242 spin_lock_irqsave(&sch->lock, flags);
258 } 243 if (ret)
259 if (sch->driver && sch->driver->notify && 244 action = NONE;
260 sch->driver->notify(&sch->dev, event)) {
261 cio_disable_subchannel(sch);
262 device_set_disconnected(sch);
263 ret = 0;
264 break;
265 } 245 }
266 /*
267 * Unregister subchannel.
268 * The device will be killed automatically.
269 */
270 cio_disable_subchannel(sch);
271 css_sch_device_unregister(sch);
272 /* Reset intparm to zeroes. */
273 sch->schib.pmcw.intparm = 0;
274 cio_modify(sch);
275 put_device(&sch->dev);
276 ret = 0;
277 break; 246 break;
278 case CIO_REVALIDATE: 247 case CIO_REVALIDATE:
279 /* 248 /* Device will be removed, so no notify necessary. */
280 * Revalidation machine check. Sick. 249 if (disc)
281 * We don't notify the driver since we have to throw the device 250 /* Reprobe because immediate unregister might block. */
282 * away in any case. 251 action = REPROBE;
283 */ 252 else
284 if (!disc) { 253 action = UNREGISTER_PROBE;
285 css_sch_device_unregister(sch);
286 /* Reset intparm to zeroes. */
287 sch->schib.pmcw.intparm = 0;
288 cio_modify(sch);
289 put_device(&sch->dev);
290 ret = css_probe_device(schid);
291 } else {
292 /*
293 * We can't immediately deregister the disconnected
294 * device since it might block.
295 */
296 spin_lock_irqsave(&sch->lock, flags);
297 device_trigger_reprobe(sch);
298 spin_unlock_irqrestore(&sch->lock, flags);
299 ret = 0;
300 }
301 break; 254 break;
302 case CIO_OPER: 255 case CIO_OPER:
303 if (disc) { 256 if (disc)
304 spin_lock_irqsave(&sch->lock, flags);
305 /* Get device operational again. */ 257 /* Get device operational again. */
306 device_trigger_reprobe(sch); 258 action = REPROBE;
307 spin_unlock_irqrestore(&sch->lock, flags); 259 break;
308 } 260 }
309 ret = sch ? 0 : css_probe_device(schid); 261 /* Perform action. */
262 ret = 0;
263 switch (action) {
264 case UNREGISTER:
265 case UNREGISTER_PROBE:
266 /* Unregister device (will use subchannel lock). */
267 spin_unlock_irqrestore(&sch->lock, flags);
268 css_sch_device_unregister(sch);
269 spin_lock_irqsave(&sch->lock, flags);
270
271 /* Reset intparm to zeroes. */
272 sch->schib.pmcw.intparm = 0;
273 cio_modify(sch);
274
275 /* Probe if necessary. */
276 if (action == UNREGISTER_PROBE)
277 ret = css_probe_device(sch->schid);
278 break;
279 case REPROBE:
280 device_trigger_reprobe(sch);
310 break; 281 break;
311 default: 282 default:
312 BUG(); 283 break;
313 ret = 0; 284 }
285 spin_unlock_irqrestore(&sch->lock, flags);
286
287 return ret;
288}
289
290static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
291{
292 struct schib schib;
293
294 if (!slow) {
295 /* Will be done on the slow path. */
296 return -EAGAIN;
314 } 297 }
298 if (stsch(schid, &schib) || !schib.pmcw.dnv) {
299 /* Unusable - ignore. */
300 return 0;
301 }
302 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, "
303 "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER);
304
305 return css_probe_device(schid);
306}
307
308static int css_evaluate_subchannel(struct subchannel_id schid, int slow)
309{
310 struct subchannel *sch;
311 int ret;
312
313 sch = get_subchannel_by_schid(schid);
314 if (sch) {
315 ret = css_evaluate_known_subchannel(sch, slow);
316 put_device(&sch->dev);
317 } else
318 ret = css_evaluate_new_subchannel(schid, slow);
319
315 return ret; 320 return ret;
316} 321}
317 322
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 646da5640401..688945662c15 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -52,53 +52,81 @@ ccw_bus_match (struct device * dev, struct device_driver * drv)
52 return 1; 52 return 1;
53} 53}
54 54
55/* 55/* Store modalias string delimited by prefix/suffix string into buffer with
56 * Hotplugging interface for ccw devices. 56 * specified size. Return length of resulting string (excluding trailing '\0')
57 * Heavily modeled on pci and usb hotplug. 57 * even if string doesn't fit buffer (snprintf semantics). */
58 */ 58static int snprint_alias(char *buf, size_t size, const char *prefix,
59static int 59 struct ccw_device_id *id, const char *suffix)
60ccw_uevent (struct device *dev, char **envp, int num_envp,
61 char *buffer, int buffer_size)
62{ 60{
63 struct ccw_device *cdev = to_ccwdev(dev); 61 int len;
64 int i = 0;
65 int length = 0;
66 62
67 if (!cdev) 63 len = snprintf(buf, size, "%sccw:t%04Xm%02X", prefix, id->cu_type,
68 return -ENODEV; 64 id->cu_model);
65 if (len > size)
66 return len;
67 buf += len;
68 size -= len;
69 69
70 /* what we want to pass to /sbin/hotplug */ 70 if (id->dev_type != 0)
71 len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
72 id->dev_model, suffix);
73 else
74 len += snprintf(buf, size, "dtdm%s", suffix);
71 75
72 envp[i++] = buffer; 76 return len;
73 length += scnprintf(buffer, buffer_size - length, "CU_TYPE=%04X", 77}
74 cdev->id.cu_type);
75 if ((buffer_size - length <= 0) || (i >= num_envp))
76 return -ENOMEM;
77 ++length;
78 buffer += length;
79 78
79/* Set up environment variables for ccw device uevent. Return 0 on success,
80 * non-zero otherwise. */
81static int ccw_uevent(struct device *dev, char **envp, int num_envp,
82 char *buffer, int buffer_size)
83{
84 struct ccw_device *cdev = to_ccwdev(dev);
85 struct ccw_device_id *id = &(cdev->id);
86 int i = 0;
87 int len;
88
89 /* CU_TYPE= */
90 len = snprintf(buffer, buffer_size, "CU_TYPE=%04X", id->cu_type) + 1;
91 if (len > buffer_size || i >= num_envp)
92 return -ENOMEM;
80 envp[i++] = buffer; 93 envp[i++] = buffer;
81 length += scnprintf(buffer, buffer_size - length, "CU_MODEL=%02X", 94 buffer += len;
82 cdev->id.cu_model); 95 buffer_size -= len;
83 if ((buffer_size - length <= 0) || (i >= num_envp)) 96
97 /* CU_MODEL= */
98 len = snprintf(buffer, buffer_size, "CU_MODEL=%02X", id->cu_model) + 1;
99 if (len > buffer_size || i >= num_envp)
84 return -ENOMEM; 100 return -ENOMEM;
85 ++length; 101 envp[i++] = buffer;
86 buffer += length; 102 buffer += len;
103 buffer_size -= len;
87 104
88 /* The next two can be zero, that's ok for us */ 105 /* The next two can be zero, that's ok for us */
89 envp[i++] = buffer; 106 /* DEV_TYPE= */
90 length += scnprintf(buffer, buffer_size - length, "DEV_TYPE=%04X", 107 len = snprintf(buffer, buffer_size, "DEV_TYPE=%04X", id->dev_type) + 1;
91 cdev->id.dev_type); 108 if (len > buffer_size || i >= num_envp)
92 if ((buffer_size - length <= 0) || (i >= num_envp))
93 return -ENOMEM; 109 return -ENOMEM;
94 ++length; 110 envp[i++] = buffer;
95 buffer += length; 111 buffer += len;
112 buffer_size -= len;
96 113
114 /* DEV_MODEL= */
115 len = snprintf(buffer, buffer_size, "DEV_MODEL=%02X",
116 (unsigned char) id->dev_model) + 1;
117 if (len > buffer_size || i >= num_envp)
118 return -ENOMEM;
97 envp[i++] = buffer; 119 envp[i++] = buffer;
98 length += scnprintf(buffer, buffer_size - length, "DEV_MODEL=%02X", 120 buffer += len;
99 cdev->id.dev_model); 121 buffer_size -= len;
100 if ((buffer_size - length <= 0) || (i >= num_envp)) 122
123 /* MODALIAS= */
124 len = snprint_alias(buffer, buffer_size, "MODALIAS=", id, "") + 1;
125 if (len > buffer_size || i >= num_envp)
101 return -ENOMEM; 126 return -ENOMEM;
127 envp[i++] = buffer;
128 buffer += len;
129 buffer_size -= len;
102 130
103 envp[i] = NULL; 131 envp[i] = NULL;
104 132
@@ -251,16 +279,11 @@ modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
251{ 279{
252 struct ccw_device *cdev = to_ccwdev(dev); 280 struct ccw_device *cdev = to_ccwdev(dev);
253 struct ccw_device_id *id = &(cdev->id); 281 struct ccw_device_id *id = &(cdev->id);
254 int ret; 282 int len;
255 283
256 ret = sprintf(buf, "ccw:t%04Xm%02X", 284 len = snprint_alias(buf, PAGE_SIZE, "", id, "\n") + 1;
257 id->cu_type, id->cu_model); 285
258 if (id->dev_type != 0) 286 return len > PAGE_SIZE ? PAGE_SIZE : len;
259 ret += sprintf(buf + ret, "dt%04Xdm%02X\n",
260 id->dev_type, id->dev_model);
261 else
262 ret += sprintf(buf + ret, "dtdm\n");
263 return ret;
264} 287}
265 288
266static ssize_t 289static ssize_t
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 35e162ba6d54..dace46fc32e8 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -232,10 +232,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
232 */ 232 */
233 old_lpm = sch->lpm; 233 old_lpm = sch->lpm;
234 stsch(sch->schid, &sch->schib); 234 stsch(sch->schid, &sch->schib);
235 sch->lpm = sch->schib.pmcw.pim & 235 sch->lpm = sch->schib.pmcw.pam & sch->opm;
236 sch->schib.pmcw.pam &
237 sch->schib.pmcw.pom &
238 sch->opm;
239 /* Check since device may again have become not operational. */ 236 /* Check since device may again have become not operational. */
240 if (!sch->schib.pmcw.dnv) 237 if (!sch->schib.pmcw.dnv)
241 state = DEV_STATE_NOT_OPER; 238 state = DEV_STATE_NOT_OPER;
@@ -267,6 +264,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
267 notify = 1; 264 notify = 1;
268 } 265 }
269 /* fill out sense information */ 266 /* fill out sense information */
267 memset(&cdev->id, 0, sizeof(cdev->id));
270 cdev->id.cu_type = cdev->private->senseid.cu_type; 268 cdev->id.cu_type = cdev->private->senseid.cu_type;
271 cdev->id.cu_model = cdev->private->senseid.cu_model; 269 cdev->id.cu_model = cdev->private->senseid.cu_model;
272 cdev->id.dev_type = cdev->private->senseid.dev_type; 270 cdev->id.dev_type = cdev->private->senseid.dev_type;
@@ -454,8 +452,8 @@ ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
454 return; 452 return;
455 } 453 }
456 /* Start Path Group verification. */ 454 /* Start Path Group verification. */
457 sch->vpm = 0; /* Start with no path groups set. */
458 cdev->private->state = DEV_STATE_VERIFY; 455 cdev->private->state = DEV_STATE_VERIFY;
456 cdev->private->flags.doverify = 0;
459 ccw_device_verify_start(cdev); 457 ccw_device_verify_start(cdev);
460} 458}
461 459
@@ -555,7 +553,19 @@ ccw_device_nopath_notify(void *data)
555void 553void
556ccw_device_verify_done(struct ccw_device *cdev, int err) 554ccw_device_verify_done(struct ccw_device *cdev, int err)
557{ 555{
558 cdev->private->flags.doverify = 0; 556 struct subchannel *sch;
557
558 sch = to_subchannel(cdev->dev.parent);
559 /* Update schib - pom may have changed. */
560 stsch(sch->schid, &sch->schib);
561 /* Update lpm with verified path mask. */
562 sch->lpm = sch->vpm;
563 /* Repeat path verification? */
564 if (cdev->private->flags.doverify) {
565 cdev->private->flags.doverify = 0;
566 ccw_device_verify_start(cdev);
567 return;
568 }
559 switch (err) { 569 switch (err) {
560 case -EOPNOTSUPP: /* path grouping not supported, just set online. */ 570 case -EOPNOTSUPP: /* path grouping not supported, just set online. */
561 cdev->private->options.pgroup = 0; 571 cdev->private->options.pgroup = 0;
@@ -613,6 +623,7 @@ ccw_device_online(struct ccw_device *cdev)
613 if (!cdev->private->options.pgroup) { 623 if (!cdev->private->options.pgroup) {
614 /* Start initial path verification. */ 624 /* Start initial path verification. */
615 cdev->private->state = DEV_STATE_VERIFY; 625 cdev->private->state = DEV_STATE_VERIFY;
626 cdev->private->flags.doverify = 0;
616 ccw_device_verify_start(cdev); 627 ccw_device_verify_start(cdev);
617 return 0; 628 return 0;
618 } 629 }
@@ -659,7 +670,6 @@ ccw_device_offline(struct ccw_device *cdev)
659 /* Are we doing path grouping? */ 670 /* Are we doing path grouping? */
660 if (!cdev->private->options.pgroup) { 671 if (!cdev->private->options.pgroup) {
661 /* No, set state offline immediately. */ 672 /* No, set state offline immediately. */
662 sch->vpm = 0;
663 ccw_device_done(cdev, DEV_STATE_OFFLINE); 673 ccw_device_done(cdev, DEV_STATE_OFFLINE);
664 return 0; 674 return 0;
665 } 675 }
@@ -780,6 +790,7 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
780 } 790 }
781 /* Device is idle, we can do the path verification. */ 791 /* Device is idle, we can do the path verification. */
782 cdev->private->state = DEV_STATE_VERIFY; 792 cdev->private->state = DEV_STATE_VERIFY;
793 cdev->private->flags.doverify = 0;
783 ccw_device_verify_start(cdev); 794 ccw_device_verify_start(cdev);
784} 795}
785 796
@@ -1042,9 +1053,9 @@ ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1042} 1053}
1043 1054
1044static void 1055static void
1045ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event) 1056ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
1046{ 1057{
1047 /* When the I/O has terminated, we have to start verification. */ 1058 /* Start verification after current task finished. */
1048 cdev->private->flags.doverify = 1; 1059 cdev->private->flags.doverify = 1;
1049} 1060}
1050 1061
@@ -1110,10 +1121,7 @@ device_trigger_reprobe(struct subchannel *sch)
1110 * The pim, pam, pom values may not be accurate, but they are the best 1121 * The pim, pam, pom values may not be accurate, but they are the best
1111 * we have before performing device selection :/ 1122 * we have before performing device selection :/
1112 */ 1123 */
1113 sch->lpm = sch->schib.pmcw.pim & 1124 sch->lpm = sch->schib.pmcw.pam & sch->opm;
1114 sch->schib.pmcw.pam &
1115 sch->schib.pmcw.pom &
1116 sch->opm;
1117 /* Re-set some bits in the pmcw that were lost. */ 1125 /* Re-set some bits in the pmcw that were lost. */
1118 sch->schib.pmcw.isc = 3; 1126 sch->schib.pmcw.isc = 3;
1119 sch->schib.pmcw.csense = 1; 1127 sch->schib.pmcw.csense = 1;
@@ -1237,7 +1245,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1237 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1245 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
1238 [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq, 1246 [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq,
1239 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 1247 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
1240 [DEV_EVENT_VERIFY] = ccw_device_nop, 1248 [DEV_EVENT_VERIFY] = ccw_device_delay_verify,
1241 }, 1249 },
1242 [DEV_STATE_ONLINE] = { 1250 [DEV_STATE_ONLINE] = {
1243 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1251 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
@@ -1280,7 +1288,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1280 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1288 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
1281 [DEV_EVENT_INTERRUPT] = ccw_device_wait4io_irq, 1289 [DEV_EVENT_INTERRUPT] = ccw_device_wait4io_irq,
1282 [DEV_EVENT_TIMEOUT] = ccw_device_wait4io_timeout, 1290 [DEV_EVENT_TIMEOUT] = ccw_device_wait4io_timeout,
1283 [DEV_EVENT_VERIFY] = ccw_device_wait4io_verify, 1291 [DEV_EVENT_VERIFY] = ccw_device_delay_verify,
1284 }, 1292 },
1285 [DEV_STATE_QUIESCE] = { 1293 [DEV_STATE_QUIESCE] = {
1286 [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done, 1294 [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done,
@@ -1293,7 +1301,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1293 [DEV_EVENT_NOTOPER] = ccw_device_nop, 1301 [DEV_EVENT_NOTOPER] = ccw_device_nop,
1294 [DEV_EVENT_INTERRUPT] = ccw_device_start_id, 1302 [DEV_EVENT_INTERRUPT] = ccw_device_start_id,
1295 [DEV_EVENT_TIMEOUT] = ccw_device_bug, 1303 [DEV_EVENT_TIMEOUT] = ccw_device_bug,
1296 [DEV_EVENT_VERIFY] = ccw_device_nop, 1304 [DEV_EVENT_VERIFY] = ccw_device_start_id,
1297 }, 1305 },
1298 [DEV_STATE_DISCONNECTED_SENSE_ID] = { 1306 [DEV_STATE_DISCONNECTED_SENSE_ID] = {
1299 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, 1307 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper,
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 9e3de0bd59b5..93a897eebfff 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -96,6 +96,12 @@ ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
96 ret = cio_set_options (sch, flags); 96 ret = cio_set_options (sch, flags);
97 if (ret) 97 if (ret)
98 return ret; 98 return ret;
99 /* Adjust requested path mask to excluded varied off paths. */
100 if (lpm) {
101 lpm &= sch->opm;
102 if (lpm == 0)
103 return -EACCES;
104 }
99 ret = cio_start_key (sch, cpa, lpm, key); 105 ret = cio_start_key (sch, cpa, lpm, key);
100 if (ret == 0) 106 if (ret == 0)
101 cdev->private->intparm = intparm; 107 cdev->private->intparm = intparm;
@@ -250,7 +256,7 @@ ccw_device_get_path_mask(struct ccw_device *cdev)
250 if (!sch) 256 if (!sch)
251 return 0; 257 return 0;
252 else 258 else
253 return sch->vpm; 259 return sch->lpm;
254} 260}
255 261
256static void 262static void
@@ -304,7 +310,7 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _
304 sch = to_subchannel(cdev->dev.parent); 310 sch = to_subchannel(cdev->dev.parent);
305 do { 311 do {
306 ret = cio_start (sch, ccw, lpm); 312 ret = cio_start (sch, ccw, lpm);
307 if ((ret == -EBUSY) || (ret == -EACCES)) { 313 if (ret == -EBUSY) {
308 /* Try again later. */ 314 /* Try again later. */
309 spin_unlock_irq(&sch->lock); 315 spin_unlock_irq(&sch->lock);
310 msleep(10); 316 msleep(10);
@@ -433,6 +439,13 @@ read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lp
433 if (!ciw || ciw->cmd == 0) 439 if (!ciw || ciw->cmd == 0)
434 return -EOPNOTSUPP; 440 return -EOPNOTSUPP;
435 441
442 /* Adjust requested path mask to excluded varied off paths. */
443 if (lpm) {
444 lpm &= sch->opm;
445 if (lpm == 0)
446 return -EACCES;
447 }
448
436 rcd_ccw = kzalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); 449 rcd_ccw = kzalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
437 if (!rcd_ccw) 450 if (!rcd_ccw)
438 return -ENOMEM; 451 return -ENOMEM;
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 1693a102dcfe..8ca2d078848c 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -245,18 +245,17 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
245 memset(&cdev->private->irb, 0, sizeof(struct irb)); 245 memset(&cdev->private->irb, 0, sizeof(struct irb));
246 246
247 /* Try multiple times. */ 247 /* Try multiple times. */
248 ret = -ENODEV; 248 ret = -EACCES;
249 if (cdev->private->iretry > 0) { 249 if (cdev->private->iretry > 0) {
250 cdev->private->iretry--; 250 cdev->private->iretry--;
251 ret = cio_start (sch, cdev->private->iccws, 251 ret = cio_start (sch, cdev->private->iccws,
252 cdev->private->imask); 252 cdev->private->imask);
253 /* ret is 0, -EBUSY, -EACCES or -ENODEV */ 253 /* We expect an interrupt in case of success or busy
254 if ((ret != -EACCES) && (ret != -ENODEV)) 254 * indication. */
255 if ((ret == 0) || (ret == -EBUSY))
255 return ret; 256 return ret;
256 } 257 }
257 /* PGID command failed on this path. Switch it off. */ 258 /* PGID command failed on this path. */
258 sch->lpm &= ~cdev->private->imask;
259 sch->vpm &= ~cdev->private->imask;
260 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel " 259 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel "
261 "0.%x.%04x, lpm %02X, became 'not operational'\n", 260 "0.%x.%04x, lpm %02X, became 'not operational'\n",
262 cdev->private->devno, sch->schid.ssid, 261 cdev->private->devno, sch->schid.ssid,
@@ -286,18 +285,17 @@ static int __ccw_device_do_nop(struct ccw_device *cdev)
286 memset(&cdev->private->irb, 0, sizeof(struct irb)); 285 memset(&cdev->private->irb, 0, sizeof(struct irb));
287 286
288 /* Try multiple times. */ 287 /* Try multiple times. */
289 ret = -ENODEV; 288 ret = -EACCES;
290 if (cdev->private->iretry > 0) { 289 if (cdev->private->iretry > 0) {
291 cdev->private->iretry--; 290 cdev->private->iretry--;
292 ret = cio_start (sch, cdev->private->iccws, 291 ret = cio_start (sch, cdev->private->iccws,
293 cdev->private->imask); 292 cdev->private->imask);
294 /* ret is 0, -EBUSY, -EACCES or -ENODEV */ 293 /* We expect an interrupt in case of success or busy
295 if ((ret != -EACCES) && (ret != -ENODEV)) 294 * indication. */
295 if ((ret == 0) || (ret == -EBUSY))
296 return ret; 296 return ret;
297 } 297 }
298 /* nop command failed on this path. Switch it off. */ 298 /* nop command failed on this path. */
299 sch->lpm &= ~cdev->private->imask;
300 sch->vpm &= ~cdev->private->imask;
301 CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel " 299 CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel "
302 "0.%x.%04x, lpm %02X, became 'not operational'\n", 300 "0.%x.%04x, lpm %02X, became 'not operational'\n",
303 cdev->private->devno, sch->schid.ssid, 301 cdev->private->devno, sch->schid.ssid,
@@ -372,27 +370,32 @@ static void
372__ccw_device_verify_start(struct ccw_device *cdev) 370__ccw_device_verify_start(struct ccw_device *cdev)
373{ 371{
374 struct subchannel *sch; 372 struct subchannel *sch;
375 __u8 imask, func; 373 __u8 func;
376 int ret; 374 int ret;
377 375
378 sch = to_subchannel(cdev->dev.parent); 376 sch = to_subchannel(cdev->dev.parent);
379 while (sch->vpm != sch->lpm) { 377 /* Repeat for all paths. */
380 /* Find first unequal bit in vpm vs. lpm */ 378 for (; cdev->private->imask; cdev->private->imask >>= 1,
381 for (imask = 0x80; imask != 0; imask >>= 1) 379 cdev->private->iretry = 5) {
382 if ((sch->vpm & imask) != (sch->lpm & imask)) 380 if ((cdev->private->imask & sch->schib.pmcw.pam) == 0)
383 break; 381 /* Path not available, try next. */
384 cdev->private->imask = imask; 382 continue;
385 if (cdev->private->options.pgroup) { 383 if (cdev->private->options.pgroup) {
386 func = (sch->vpm & imask) ? 384 if (sch->opm & cdev->private->imask)
387 SPID_FUNC_RESIGN : SPID_FUNC_ESTABLISH; 385 func = SPID_FUNC_ESTABLISH;
386 else
387 func = SPID_FUNC_RESIGN;
388 ret = __ccw_device_do_pgid(cdev, func); 388 ret = __ccw_device_do_pgid(cdev, func);
389 } else 389 } else
390 ret = __ccw_device_do_nop(cdev); 390 ret = __ccw_device_do_nop(cdev);
391 /* We expect an interrupt in case of success or busy
392 * indication. */
391 if (ret == 0 || ret == -EBUSY) 393 if (ret == 0 || ret == -EBUSY)
392 return; 394 return;
393 cdev->private->iretry = 5; 395 /* Permanent path failure, try next. */
394 } 396 }
395 ccw_device_verify_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV); 397 /* Done with all paths. */
398 ccw_device_verify_done(cdev, (sch->vpm != 0) ? 0 : -ENODEV);
396} 399}
397 400
398/* 401/*
@@ -421,14 +424,14 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
421 else 424 else
422 ret = __ccw_device_check_nop(cdev); 425 ret = __ccw_device_check_nop(cdev);
423 memset(&cdev->private->irb, 0, sizeof(struct irb)); 426 memset(&cdev->private->irb, 0, sizeof(struct irb));
427
424 switch (ret) { 428 switch (ret) {
425 /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */ 429 /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
426 case 0: 430 case 0:
427 /* Establish or Resign Path Group done. Update vpm. */ 431 /* Path verification ccw finished successfully, update lpm. */
428 if ((sch->lpm & cdev->private->imask) != 0) 432 sch->vpm |= sch->opm & cdev->private->imask;
429 sch->vpm |= cdev->private->imask; 433 /* Go on with next path. */
430 else 434 cdev->private->imask >>= 1;
431 sch->vpm &= ~cdev->private->imask;
432 cdev->private->iretry = 5; 435 cdev->private->iretry = 5;
433 __ccw_device_verify_start(cdev); 436 __ccw_device_verify_start(cdev);
434 break; 437 break;
@@ -441,6 +444,10 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
441 cdev->private->options.pgroup = 0; 444 cdev->private->options.pgroup = 0;
442 else 445 else
443 cdev->private->flags.pgid_single = 1; 446 cdev->private->flags.pgid_single = 1;
447 /* Retry */
448 sch->vpm = 0;
449 cdev->private->imask = 0x80;
450 cdev->private->iretry = 5;
444 /* fall through. */ 451 /* fall through. */
445 case -EAGAIN: /* Try again. */ 452 case -EAGAIN: /* Try again. */
446 __ccw_device_verify_start(cdev); 453 __ccw_device_verify_start(cdev);
@@ -449,8 +456,7 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
449 ccw_device_verify_done(cdev, -ETIME); 456 ccw_device_verify_done(cdev, -ETIME);
450 break; 457 break;
451 case -EACCES: /* channel is not operational. */ 458 case -EACCES: /* channel is not operational. */
452 sch->lpm &= ~cdev->private->imask; 459 cdev->private->imask >>= 1;
453 sch->vpm &= ~cdev->private->imask;
454 cdev->private->iretry = 5; 460 cdev->private->iretry = 5;
455 __ccw_device_verify_start(cdev); 461 __ccw_device_verify_start(cdev);
456 break; 462 break;
@@ -463,19 +469,17 @@ ccw_device_verify_start(struct ccw_device *cdev)
463 struct subchannel *sch = to_subchannel(cdev->dev.parent); 469 struct subchannel *sch = to_subchannel(cdev->dev.parent);
464 470
465 cdev->private->flags.pgid_single = 0; 471 cdev->private->flags.pgid_single = 0;
472 cdev->private->imask = 0x80;
466 cdev->private->iretry = 5; 473 cdev->private->iretry = 5;
467 /* 474
468 * Update sch->lpm with current values to catch paths becoming 475 /* Start with empty vpm. */
469 * available again. 476 sch->vpm = 0;
470 */ 477
478 /* Get current pam. */
471 if (stsch(sch->schid, &sch->schib)) { 479 if (stsch(sch->schid, &sch->schib)) {
472 ccw_device_verify_done(cdev, -ENODEV); 480 ccw_device_verify_done(cdev, -ENODEV);
473 return; 481 return;
474 } 482 }
475 sch->lpm = sch->schib.pmcw.pim &
476 sch->schib.pmcw.pam &
477 sch->schib.pmcw.pom &
478 sch->opm;
479 __ccw_device_verify_start(cdev); 483 __ccw_device_verify_start(cdev);
480} 484}
481 485
@@ -524,7 +528,6 @@ ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event)
524 switch (ret) { 528 switch (ret) {
525 /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */ 529 /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
526 case 0: /* disband successful. */ 530 case 0: /* disband successful. */
527 sch->vpm = 0;
528 ccw_device_disband_done(cdev, ret); 531 ccw_device_disband_done(cdev, ret);
529 break; 532 break;
530 case -EOPNOTSUPP: 533 case -EOPNOTSUPP:
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 7c93a8798d23..cde822d8b5c8 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -115,7 +115,7 @@ qdio_min(int a,int b)
115static inline __u64 115static inline __u64
116qdio_get_micros(void) 116qdio_get_micros(void)
117{ 117{
118 return (get_clock() >> 10); /* time>>12 is microseconds */ 118 return (get_clock() >> 12); /* time>>12 is microseconds */
119} 119}
120 120
121/* 121/*
@@ -1129,7 +1129,7 @@ out:
1129 1129
1130#ifdef QDIO_USE_PROCESSING_STATE 1130#ifdef QDIO_USE_PROCESSING_STATE
1131 if (last_position>=0) 1131 if (last_position>=0)
1132 set_slsb(q, &last_position, SLSB_P_INPUT_NOT_INIT, &count); 1132 set_slsb(q, &last_position, SLSB_P_INPUT_PROCESSING, &count);
1133#endif /* QDIO_USE_PROCESSING_STATE */ 1133#endif /* QDIO_USE_PROCESSING_STATE */
1134 1134
1135 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int)); 1135 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index ceb3ab31ee08..124569362f02 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -191,49 +191,49 @@ enum qdio_irq_states {
191#if QDIO_VERBOSE_LEVEL>8 191#if QDIO_VERBOSE_LEVEL>8
192#define QDIO_PRINT_STUPID(x...) printk( KERN_DEBUG QDIO_PRINTK_HEADER x) 192#define QDIO_PRINT_STUPID(x...) printk( KERN_DEBUG QDIO_PRINTK_HEADER x)
193#else 193#else
194#define QDIO_PRINT_STUPID(x...) 194#define QDIO_PRINT_STUPID(x...) do { } while (0)
195#endif 195#endif
196 196
197#if QDIO_VERBOSE_LEVEL>7 197#if QDIO_VERBOSE_LEVEL>7
198#define QDIO_PRINT_ALL(x...) printk( QDIO_PRINTK_HEADER x) 198#define QDIO_PRINT_ALL(x...) printk( QDIO_PRINTK_HEADER x)
199#else 199#else
200#define QDIO_PRINT_ALL(x...) 200#define QDIO_PRINT_ALL(x...) do { } while (0)
201#endif 201#endif
202 202
203#if QDIO_VERBOSE_LEVEL>6 203#if QDIO_VERBOSE_LEVEL>6
204#define QDIO_PRINT_INFO(x...) printk( QDIO_PRINTK_HEADER x) 204#define QDIO_PRINT_INFO(x...) printk( QDIO_PRINTK_HEADER x)
205#else 205#else
206#define QDIO_PRINT_INFO(x...) 206#define QDIO_PRINT_INFO(x...) do { } while (0)
207#endif 207#endif
208 208
209#if QDIO_VERBOSE_LEVEL>5 209#if QDIO_VERBOSE_LEVEL>5
210#define QDIO_PRINT_WARN(x...) printk( QDIO_PRINTK_HEADER x) 210#define QDIO_PRINT_WARN(x...) printk( QDIO_PRINTK_HEADER x)
211#else 211#else
212#define QDIO_PRINT_WARN(x...) 212#define QDIO_PRINT_WARN(x...) do { } while (0)
213#endif 213#endif
214 214
215#if QDIO_VERBOSE_LEVEL>4 215#if QDIO_VERBOSE_LEVEL>4
216#define QDIO_PRINT_ERR(x...) printk( QDIO_PRINTK_HEADER x) 216#define QDIO_PRINT_ERR(x...) printk( QDIO_PRINTK_HEADER x)
217#else 217#else
218#define QDIO_PRINT_ERR(x...) 218#define QDIO_PRINT_ERR(x...) do { } while (0)
219#endif 219#endif
220 220
221#if QDIO_VERBOSE_LEVEL>3 221#if QDIO_VERBOSE_LEVEL>3
222#define QDIO_PRINT_CRIT(x...) printk( QDIO_PRINTK_HEADER x) 222#define QDIO_PRINT_CRIT(x...) printk( QDIO_PRINTK_HEADER x)
223#else 223#else
224#define QDIO_PRINT_CRIT(x...) 224#define QDIO_PRINT_CRIT(x...) do { } while (0)
225#endif 225#endif
226 226
227#if QDIO_VERBOSE_LEVEL>2 227#if QDIO_VERBOSE_LEVEL>2
228#define QDIO_PRINT_ALERT(x...) printk( QDIO_PRINTK_HEADER x) 228#define QDIO_PRINT_ALERT(x...) printk( QDIO_PRINTK_HEADER x)
229#else 229#else
230#define QDIO_PRINT_ALERT(x...) 230#define QDIO_PRINT_ALERT(x...) do { } while (0)
231#endif 231#endif
232 232
233#if QDIO_VERBOSE_LEVEL>1 233#if QDIO_VERBOSE_LEVEL>1
234#define QDIO_PRINT_EMERG(x...) printk( QDIO_PRINTK_HEADER x) 234#define QDIO_PRINT_EMERG(x...) printk( QDIO_PRINTK_HEADER x)
235#else 235#else
236#define QDIO_PRINT_EMERG(x...) 236#define QDIO_PRINT_EMERG(x...) do { } while (0)
237#endif 237#endif
238 238
239#define HEXDUMP16(importance,header,ptr) \ 239#define HEXDUMP16(importance,header,ptr) \
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index 15edebbead7f..f0a12d2eb780 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -2,5 +2,16 @@
2# S/390 crypto devices 2# S/390 crypto devices
3# 3#
4 4
5z90crypt-objs := z90main.o z90hardware.o 5ifdef CONFIG_ZCRYPT_MONOLITHIC
6obj-$(CONFIG_Z90CRYPT) += z90crypt.o 6
7z90crypt-objs := zcrypt_mono.o ap_bus.o zcrypt_api.o \
8 zcrypt_pcica.o zcrypt_pcicc.o zcrypt_pcixcc.o zcrypt_cex2a.o
9obj-$(CONFIG_ZCRYPT) += z90crypt.o
10
11else
12
13ap-objs := ap_bus.o
14obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcicc.o zcrypt_pcixcc.o
15obj-$(CONFIG_ZCRYPT) += zcrypt_pcica.o zcrypt_cex2a.o
16
17endif
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
new file mode 100644
index 000000000000..6ed0985c0c91
--- /dev/null
+++ b/drivers/s390/crypto/ap_bus.c
@@ -0,0 +1,1221 @@
1/*
2 * linux/drivers/s390/crypto/ap_bus.c
3 *
4 * Copyright (C) 2006 IBM Corporation
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Ralph Wuerthner <rwuerthn@de.ibm.com>
8 *
9 * Adjunct processor bus.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#include <linux/module.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/err.h>
30#include <linux/interrupt.h>
31#include <linux/workqueue.h>
32#include <linux/notifier.h>
33#include <linux/kthread.h>
34#include <linux/mutex.h>
35#include <asm/s390_rdev.h>
36
37#include "ap_bus.h"
38
39/* Some prototypes. */
40static void ap_scan_bus(void *);
41static void ap_poll_all(unsigned long);
42static void ap_poll_timeout(unsigned long);
43static int ap_poll_thread_start(void);
44static void ap_poll_thread_stop(void);
45
46/**
47 * Module description.
48 */
49MODULE_AUTHOR("IBM Corporation");
50MODULE_DESCRIPTION("Adjunct Processor Bus driver, "
51 "Copyright 2006 IBM Corporation");
52MODULE_LICENSE("GPL");
53
54/**
55 * Module parameter
56 */
57int ap_domain_index = -1; /* Adjunct Processor Domain Index */
58module_param_named(domain, ap_domain_index, int, 0000);
59MODULE_PARM_DESC(domain, "domain index for ap devices");
60EXPORT_SYMBOL(ap_domain_index);
61
62static int ap_thread_flag = 1;
63module_param_named(poll_thread, ap_thread_flag, int, 0000);
64MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 1 (on).");
65
66static struct device *ap_root_device = NULL;
67
68/**
69 * Workqueue & timer for bus rescan.
70 */
71static struct workqueue_struct *ap_work_queue;
72static struct timer_list ap_config_timer;
73static int ap_config_time = AP_CONFIG_TIME;
74static DECLARE_WORK(ap_config_work, ap_scan_bus, NULL);
75
76/**
77 * Tasklet & timer for AP request polling.
78 */
79static struct timer_list ap_poll_timer = TIMER_INITIALIZER(ap_poll_timeout,0,0);
80static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0);
81static atomic_t ap_poll_requests = ATOMIC_INIT(0);
82static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
83static struct task_struct *ap_poll_kthread = NULL;
84static DEFINE_MUTEX(ap_poll_thread_mutex);
85
86/**
87 * Test if ap instructions are available.
88 *
89 * Returns 0 if the ap instructions are installed.
90 */
91static inline int ap_instructions_available(void)
92{
93 register unsigned long reg0 asm ("0") = AP_MKQID(0,0);
94 register unsigned long reg1 asm ("1") = -ENODEV;
95 register unsigned long reg2 asm ("2") = 0UL;
96
97 asm volatile(
98 " .long 0xb2af0000\n" /* PQAP(TAPQ) */
99 "0: la %1,0\n"
100 "1:\n"
101 EX_TABLE(0b, 1b)
102 : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" );
103 return reg1;
104}
105
106/**
107 * Test adjunct processor queue.
108 * @qid: the ap queue number
109 * @queue_depth: pointer to queue depth value
110 * @device_type: pointer to device type value
111 *
112 * Returns ap queue status structure.
113 */
114static inline struct ap_queue_status
115ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
116{
117 register unsigned long reg0 asm ("0") = qid;
118 register struct ap_queue_status reg1 asm ("1");
119 register unsigned long reg2 asm ("2") = 0UL;
120
121 asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
122 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
123 *device_type = (int) (reg2 >> 24);
124 *queue_depth = (int) (reg2 & 0xff);
125 return reg1;
126}
127
128/**
129 * Reset adjunct processor queue.
130 * @qid: the ap queue number
131 *
132 * Returns ap queue status structure.
133 */
134static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
135{
136 register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
137 register struct ap_queue_status reg1 asm ("1");
138 register unsigned long reg2 asm ("2") = 0UL;
139
140 asm volatile(
141 ".long 0xb2af0000" /* PQAP(RAPQ) */
142 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
143 return reg1;
144}
145
146/**
147 * Send message to adjunct processor queue.
148 * @qid: the ap queue number
149 * @psmid: the program supplied message identifier
150 * @msg: the message text
151 * @length: the message length
152 *
153 * Returns ap queue status structure.
154 *
155 * Condition code 1 on NQAP can't happen because the L bit is 1.
156 *
157 * Condition code 2 on NQAP also means the send is incomplete,
158 * because a segment boundary was reached. The NQAP is repeated.
159 */
160static inline struct ap_queue_status
161__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
162{
163 typedef struct { char _[length]; } msgblock;
164 register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
165 register struct ap_queue_status reg1 asm ("1");
166 register unsigned long reg2 asm ("2") = (unsigned long) msg;
167 register unsigned long reg3 asm ("3") = (unsigned long) length;
168 register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
169 register unsigned long reg5 asm ("5") = (unsigned int) psmid;
170
171 asm volatile (
172 "0: .long 0xb2ad0042\n" /* DQAP */
173 " brc 2,0b"
174 : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
175 : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
176 : "cc" );
177 return reg1;
178}
179
180int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
181{
182 struct ap_queue_status status;
183
184 status = __ap_send(qid, psmid, msg, length);
185 switch (status.response_code) {
186 case AP_RESPONSE_NORMAL:
187 return 0;
188 case AP_RESPONSE_Q_FULL:
189 return -EBUSY;
190 default: /* Device is gone. */
191 return -ENODEV;
192 }
193}
194EXPORT_SYMBOL(ap_send);
195
196/*
197 * Receive message from adjunct processor queue.
198 * @qid: the ap queue number
199 * @psmid: pointer to program supplied message identifier
200 * @msg: the message text
201 * @length: the message length
202 *
203 * Returns ap queue status structure.
204 *
205 * Condition code 1 on DQAP means the receive has taken place
206 * but only partially. The response is incomplete, hence the
207 * DQAP is repeated.
208 *
209 * Condition code 2 on DQAP also means the receive is incomplete,
210 * this time because a segment boundary was reached. Again, the
211 * DQAP is repeated.
212 *
213 * Note that gpr2 is used by the DQAP instruction to keep track of
214 * any 'residual' length, in case the instruction gets interrupted.
215 * Hence it gets zeroed before the instruction.
216 */
217static inline struct ap_queue_status
218__ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
219{
220 typedef struct { char _[length]; } msgblock;
221 register unsigned long reg0 asm("0") = qid | 0x80000000UL;
222 register struct ap_queue_status reg1 asm ("1");
223 register unsigned long reg2 asm("2") = 0UL;
224 register unsigned long reg4 asm("4") = (unsigned long) msg;
225 register unsigned long reg5 asm("5") = (unsigned long) length;
226 register unsigned long reg6 asm("6") = 0UL;
227 register unsigned long reg7 asm("7") = 0UL;
228
229
230 asm volatile(
231 "0: .long 0xb2ae0064\n"
232 " brc 6,0b\n"
233 : "+d" (reg0), "=d" (reg1), "+d" (reg2),
234 "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
235 "=m" (*(msgblock *) msg) : : "cc" );
236 *psmid = (((unsigned long long) reg6) << 32) + reg7;
237 return reg1;
238}
239
240int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
241{
242 struct ap_queue_status status;
243
244 status = __ap_recv(qid, psmid, msg, length);
245 switch (status.response_code) {
246 case AP_RESPONSE_NORMAL:
247 return 0;
248 case AP_RESPONSE_NO_PENDING_REPLY:
249 if (status.queue_empty)
250 return -ENOENT;
251 return -EBUSY;
252 default:
253 return -ENODEV;
254 }
255}
256EXPORT_SYMBOL(ap_recv);
257
258/**
259 * Check if an AP queue is available. The test is repeated for
260 * AP_MAX_RESET times.
261 * @qid: the ap queue number
262 * @queue_depth: pointer to queue depth value
263 * @device_type: pointer to device type value
264 */
265static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
266{
267 struct ap_queue_status status;
268 int t_depth, t_device_type, rc, i;
269
270 rc = -EBUSY;
271 for (i = 0; i < AP_MAX_RESET; i++) {
272 status = ap_test_queue(qid, &t_depth, &t_device_type);
273 switch (status.response_code) {
274 case AP_RESPONSE_NORMAL:
275 *queue_depth = t_depth + 1;
276 *device_type = t_device_type;
277 rc = 0;
278 break;
279 case AP_RESPONSE_Q_NOT_AVAIL:
280 rc = -ENODEV;
281 break;
282 case AP_RESPONSE_RESET_IN_PROGRESS:
283 break;
284 case AP_RESPONSE_DECONFIGURED:
285 rc = -ENODEV;
286 break;
287 case AP_RESPONSE_CHECKSTOPPED:
288 rc = -ENODEV;
289 break;
290 case AP_RESPONSE_BUSY:
291 break;
292 default:
293 BUG();
294 }
295 if (rc != -EBUSY)
296 break;
297 if (i < AP_MAX_RESET - 1)
298 udelay(5);
299 }
300 return rc;
301}
302
303/**
304 * Reset an AP queue and wait for it to become available again.
305 * @qid: the ap queue number
306 */
307static int ap_init_queue(ap_qid_t qid)
308{
309 struct ap_queue_status status;
310 int rc, dummy, i;
311
312 rc = -ENODEV;
313 status = ap_reset_queue(qid);
314 for (i = 0; i < AP_MAX_RESET; i++) {
315 switch (status.response_code) {
316 case AP_RESPONSE_NORMAL:
317 if (status.queue_empty)
318 rc = 0;
319 break;
320 case AP_RESPONSE_Q_NOT_AVAIL:
321 case AP_RESPONSE_DECONFIGURED:
322 case AP_RESPONSE_CHECKSTOPPED:
323 i = AP_MAX_RESET; /* return with -ENODEV */
324 break;
325 case AP_RESPONSE_RESET_IN_PROGRESS:
326 case AP_RESPONSE_BUSY:
327 default:
328 break;
329 }
330 if (rc != -ENODEV)
331 break;
332 if (i < AP_MAX_RESET - 1) {
333 udelay(5);
334 status = ap_test_queue(qid, &dummy, &dummy);
335 }
336 }
337 return rc;
338}
339
340/**
341 * AP device related attributes.
342 */
343static ssize_t ap_hwtype_show(struct device *dev,
344 struct device_attribute *attr, char *buf)
345{
346 struct ap_device *ap_dev = to_ap_dev(dev);
347 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type);
348}
349static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
350
351static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
352 char *buf)
353{
354 struct ap_device *ap_dev = to_ap_dev(dev);
355 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth);
356}
357static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
358
359static ssize_t ap_request_count_show(struct device *dev,
360 struct device_attribute *attr,
361 char *buf)
362{
363 struct ap_device *ap_dev = to_ap_dev(dev);
364 int rc;
365
366 spin_lock_bh(&ap_dev->lock);
367 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count);
368 spin_unlock_bh(&ap_dev->lock);
369 return rc;
370}
371
372static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
373
374static ssize_t ap_modalias_show(struct device *dev,
375 struct device_attribute *attr, char *buf)
376{
377 return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type);
378}
379
380static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
381
382static struct attribute *ap_dev_attrs[] = {
383 &dev_attr_hwtype.attr,
384 &dev_attr_depth.attr,
385 &dev_attr_request_count.attr,
386 &dev_attr_modalias.attr,
387 NULL
388};
389static struct attribute_group ap_dev_attr_group = {
390 .attrs = ap_dev_attrs
391};
392
393/**
394 * AP bus driver registration/unregistration.
395 */
396static int ap_bus_match(struct device *dev, struct device_driver *drv)
397{
398 struct ap_device *ap_dev = to_ap_dev(dev);
399 struct ap_driver *ap_drv = to_ap_drv(drv);
400 struct ap_device_id *id;
401
402 /**
403 * Compare device type of the device with the list of
404 * supported types of the device_driver.
405 */
406 for (id = ap_drv->ids; id->match_flags; id++) {
407 if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) &&
408 (id->dev_type != ap_dev->device_type))
409 continue;
410 return 1;
411 }
412 return 0;
413}
414
415/**
416 * uevent function for AP devices. It sets up a single environment
417 * variable DEV_TYPE which contains the hardware device type.
418 */
419static int ap_uevent (struct device *dev, char **envp, int num_envp,
420 char *buffer, int buffer_size)
421{
422 struct ap_device *ap_dev = to_ap_dev(dev);
423 int length;
424
425 if (!ap_dev)
426 return -ENODEV;
427
428 /* Set up DEV_TYPE environment variable. */
429 envp[0] = buffer;
430 length = scnprintf(buffer, buffer_size, "DEV_TYPE=%04X",
431 ap_dev->device_type);
432 if (buffer_size - length <= 0)
433 return -ENOMEM;
434 envp[1] = 0;
435 return 0;
436}
437
438static struct bus_type ap_bus_type = {
439 .name = "ap",
440 .match = &ap_bus_match,
441 .uevent = &ap_uevent,
442};
443
444static int ap_device_probe(struct device *dev)
445{
446 struct ap_device *ap_dev = to_ap_dev(dev);
447 struct ap_driver *ap_drv = to_ap_drv(dev->driver);
448 int rc;
449
450 ap_dev->drv = ap_drv;
451 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
452 if (rc)
453 ap_dev->unregistered = 1;
454 return rc;
455}
456
457/**
458 * Flush all requests from the request/pending queue of an AP device.
459 * @ap_dev: pointer to the AP device.
460 */
461static inline void __ap_flush_queue(struct ap_device *ap_dev)
462{
463 struct ap_message *ap_msg, *next;
464
465 list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
466 list_del_init(&ap_msg->list);
467 ap_dev->pendingq_count--;
468 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
469 }
470 list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
471 list_del_init(&ap_msg->list);
472 ap_dev->requestq_count--;
473 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
474 }
475}
476
477void ap_flush_queue(struct ap_device *ap_dev)
478{
479 spin_lock_bh(&ap_dev->lock);
480 __ap_flush_queue(ap_dev);
481 spin_unlock_bh(&ap_dev->lock);
482}
483EXPORT_SYMBOL(ap_flush_queue);
484
485static int ap_device_remove(struct device *dev)
486{
487 struct ap_device *ap_dev = to_ap_dev(dev);
488 struct ap_driver *ap_drv = ap_dev->drv;
489
490 spin_lock_bh(&ap_dev->lock);
491 __ap_flush_queue(ap_dev);
492 /**
493 * set ->unregistered to 1 while holding the lock. This prevents
494 * new messages to be put on the queue from now on.
495 */
496 ap_dev->unregistered = 1;
497 spin_unlock_bh(&ap_dev->lock);
498 if (ap_drv->remove)
499 ap_drv->remove(ap_dev);
500 return 0;
501}
502
503int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
504 char *name)
505{
506 struct device_driver *drv = &ap_drv->driver;
507
508 drv->bus = &ap_bus_type;
509 drv->probe = ap_device_probe;
510 drv->remove = ap_device_remove;
511 drv->owner = owner;
512 drv->name = name;
513 return driver_register(drv);
514}
515EXPORT_SYMBOL(ap_driver_register);
516
517void ap_driver_unregister(struct ap_driver *ap_drv)
518{
519 driver_unregister(&ap_drv->driver);
520}
521EXPORT_SYMBOL(ap_driver_unregister);
522
523/**
524 * AP bus attributes.
525 */
526static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
527{
528 return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
529}
530
531static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL);
532
533static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
534{
535 return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
536}
537
538static ssize_t ap_config_time_store(struct bus_type *bus,
539 const char *buf, size_t count)
540{
541 int time;
542
543 if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
544 return -EINVAL;
545 ap_config_time = time;
546 if (!timer_pending(&ap_config_timer) ||
547 !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) {
548 ap_config_timer.expires = jiffies + ap_config_time * HZ;
549 add_timer(&ap_config_timer);
550 }
551 return count;
552}
553
554static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store);
555
556static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf)
557{
558 return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
559}
560
561static ssize_t ap_poll_thread_store(struct bus_type *bus,
562 const char *buf, size_t count)
563{
564 int flag, rc;
565
566 if (sscanf(buf, "%d\n", &flag) != 1)
567 return -EINVAL;
568 if (flag) {
569 rc = ap_poll_thread_start();
570 if (rc)
571 return rc;
572 }
573 else
574 ap_poll_thread_stop();
575 return count;
576}
577
578static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store);
579
580static struct bus_attribute *const ap_bus_attrs[] = {
581 &bus_attr_ap_domain,
582 &bus_attr_config_time,
583 &bus_attr_poll_thread,
584 NULL
585};
586
587/**
588 * Pick one of the 16 ap domains.
589 */
590static inline int ap_select_domain(void)
591{
592 int queue_depth, device_type, count, max_count, best_domain;
593 int rc, i, j;
594
595 /**
596 * We want to use a single domain. Either the one specified with
597 * the "domain=" parameter or the domain with the maximum number
598 * of devices.
599 */
600 if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS)
601 /* Domain has already been selected. */
602 return 0;
603 best_domain = -1;
604 max_count = 0;
605 for (i = 0; i < AP_DOMAINS; i++) {
606 count = 0;
607 for (j = 0; j < AP_DEVICES; j++) {
608 ap_qid_t qid = AP_MKQID(j, i);
609 rc = ap_query_queue(qid, &queue_depth, &device_type);
610 if (rc)
611 continue;
612 count++;
613 }
614 if (count > max_count) {
615 max_count = count;
616 best_domain = i;
617 }
618 }
619 if (best_domain >= 0){
620 ap_domain_index = best_domain;
621 return 0;
622 }
623 return -ENODEV;
624}
625
626/**
627 * Find the device type if query queue returned a device type of 0.
628 * @ap_dev: pointer to the AP device.
629 */
630static int ap_probe_device_type(struct ap_device *ap_dev)
631{
632 static unsigned char msg[] = {
633 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
634 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
635 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
636 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
637 0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50,
638 0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,
639 0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00,
640 0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,
641 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
642 0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,
643 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
644 0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,
645 0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00,
646 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
647 0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00,
648 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
649 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
650 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
651 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
652 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
653 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
654 0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,
655 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
656 0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,
657 0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20,
658 0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,
659 0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22,
660 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
661 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,
662 0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
663 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
664 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,
665 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
666 0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,
667 0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00,
668 0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,
669 0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01,
670 0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,
671 0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68,
672 0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,
673 0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0,
674 0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,
675 0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04,
676 0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,
677 0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d,
678 };
679 struct ap_queue_status status;
680 unsigned long long psmid;
681 char *reply;
682 int rc, i;
683
684 reply = (void *) get_zeroed_page(GFP_KERNEL);
685 if (!reply) {
686 rc = -ENOMEM;
687 goto out;
688 }
689
690 status = __ap_send(ap_dev->qid, 0x0102030405060708ULL,
691 msg, sizeof(msg));
692 if (status.response_code != AP_RESPONSE_NORMAL) {
693 rc = -ENODEV;
694 goto out_free;
695 }
696
697 /* Wait for the test message to complete. */
698 for (i = 0; i < 6; i++) {
699 mdelay(300);
700 status = __ap_recv(ap_dev->qid, &psmid, reply, 4096);
701 if (status.response_code == AP_RESPONSE_NORMAL &&
702 psmid == 0x0102030405060708ULL)
703 break;
704 }
705 if (i < 6) {
706 /* Got an answer. */
707 if (reply[0] == 0x00 && reply[1] == 0x86)
708 ap_dev->device_type = AP_DEVICE_TYPE_PCICC;
709 else
710 ap_dev->device_type = AP_DEVICE_TYPE_PCICA;
711 rc = 0;
712 } else
713 rc = -ENODEV;
714
715out_free:
716 free_page((unsigned long) reply);
717out:
718 return rc;
719}
720
721/**
722 * Scan the ap bus for new devices.
723 */
724static int __ap_scan_bus(struct device *dev, void *data)
725{
726 return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data;
727}
728
729static void ap_device_release(struct device *dev)
730{
731 struct ap_device *ap_dev = to_ap_dev(dev);
732
733 kfree(ap_dev);
734}
735
736static void ap_scan_bus(void *data)
737{
738 struct ap_device *ap_dev;
739 struct device *dev;
740 ap_qid_t qid;
741 int queue_depth, device_type;
742 int rc, i;
743
744 if (ap_select_domain() != 0)
745 return;
746 for (i = 0; i < AP_DEVICES; i++) {
747 qid = AP_MKQID(i, ap_domain_index);
748 dev = bus_find_device(&ap_bus_type, NULL,
749 (void *)(unsigned long)qid,
750 __ap_scan_bus);
751 if (dev) {
752 put_device(dev);
753 continue;
754 }
755 rc = ap_query_queue(qid, &queue_depth, &device_type);
756 if (rc)
757 continue;
758 rc = ap_init_queue(qid);
759 if (rc)
760 continue;
761 ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL);
762 if (!ap_dev)
763 break;
764 ap_dev->qid = qid;
765 ap_dev->queue_depth = queue_depth;
766 spin_lock_init(&ap_dev->lock);
767 INIT_LIST_HEAD(&ap_dev->pendingq);
768 INIT_LIST_HEAD(&ap_dev->requestq);
769 if (device_type == 0)
770 ap_probe_device_type(ap_dev);
771 else
772 ap_dev->device_type = device_type;
773
774 ap_dev->device.bus = &ap_bus_type;
775 ap_dev->device.parent = ap_root_device;
776 snprintf(ap_dev->device.bus_id, BUS_ID_SIZE, "card%02x",
777 AP_QID_DEVICE(ap_dev->qid));
778 ap_dev->device.release = ap_device_release;
779 rc = device_register(&ap_dev->device);
780 if (rc) {
781 kfree(ap_dev);
782 continue;
783 }
784 /* Add device attributes. */
785 rc = sysfs_create_group(&ap_dev->device.kobj,
786 &ap_dev_attr_group);
787 if (rc)
788 device_unregister(&ap_dev->device);
789 }
790}
791
792static void
793ap_config_timeout(unsigned long ptr)
794{
795 queue_work(ap_work_queue, &ap_config_work);
796 ap_config_timer.expires = jiffies + ap_config_time * HZ;
797 add_timer(&ap_config_timer);
798}
799
800/**
801 * Set up the timer to run the poll tasklet
802 */
803static inline void ap_schedule_poll_timer(void)
804{
805 if (timer_pending(&ap_poll_timer))
806 return;
807 mod_timer(&ap_poll_timer, jiffies + AP_POLL_TIME);
808}
809
810/**
811 * Receive pending reply messages from an AP device.
812 * @ap_dev: pointer to the AP device
813 * @flags: pointer to control flags, bit 2^0 is set if another poll is
814 * required, bit 2^1 is set if the poll timer needs to get armed
815 * Returns 0 if the device is still present, -ENODEV if not.
816 */
817static inline int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
818{
819 struct ap_queue_status status;
820 struct ap_message *ap_msg;
821
822 if (ap_dev->queue_count <= 0)
823 return 0;
824 status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid,
825 ap_dev->reply->message, ap_dev->reply->length);
826 switch (status.response_code) {
827 case AP_RESPONSE_NORMAL:
828 atomic_dec(&ap_poll_requests);
829 ap_dev->queue_count--;
830 list_for_each_entry(ap_msg, &ap_dev->pendingq, list) {
831 if (ap_msg->psmid != ap_dev->reply->psmid)
832 continue;
833 list_del_init(&ap_msg->list);
834 ap_dev->pendingq_count--;
835 ap_dev->drv->receive(ap_dev, ap_msg, ap_dev->reply);
836 break;
837 }
838 if (ap_dev->queue_count > 0)
839 *flags |= 1;
840 break;
841 case AP_RESPONSE_NO_PENDING_REPLY:
842 if (status.queue_empty) {
843 /* The card shouldn't forget requests but who knows. */
844 ap_dev->queue_count = 0;
845 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
846 ap_dev->requestq_count += ap_dev->pendingq_count;
847 ap_dev->pendingq_count = 0;
848 } else
849 *flags |= 2;
850 break;
851 default:
852 return -ENODEV;
853 }
854 return 0;
855}
856
857/**
858 * Send messages from the request queue to an AP device.
859 * @ap_dev: pointer to the AP device
860 * @flags: pointer to control flags, bit 2^0 is set if another poll is
861 * required, bit 2^1 is set if the poll timer needs to get armed
862 * Returns 0 if the device is still present, -ENODEV if not.
863 */
864static inline int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
865{
866 struct ap_queue_status status;
867 struct ap_message *ap_msg;
868
869 if (ap_dev->requestq_count <= 0 ||
870 ap_dev->queue_count >= ap_dev->queue_depth)
871 return 0;
872 /* Start the next request on the queue. */
873 ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
874 status = __ap_send(ap_dev->qid, ap_msg->psmid,
875 ap_msg->message, ap_msg->length);
876 switch (status.response_code) {
877 case AP_RESPONSE_NORMAL:
878 atomic_inc(&ap_poll_requests);
879 ap_dev->queue_count++;
880 list_move_tail(&ap_msg->list, &ap_dev->pendingq);
881 ap_dev->requestq_count--;
882 ap_dev->pendingq_count++;
883 if (ap_dev->queue_count < ap_dev->queue_depth &&
884 ap_dev->requestq_count > 0)
885 *flags |= 1;
886 *flags |= 2;
887 break;
888 case AP_RESPONSE_Q_FULL:
889 *flags |= 2;
890 break;
891 case AP_RESPONSE_MESSAGE_TOO_BIG:
892 return -EINVAL;
893 default:
894 return -ENODEV;
895 }
896 return 0;
897}
898
899/**
900 * Poll AP device for pending replies and send new messages. If either
901 * ap_poll_read or ap_poll_write returns -ENODEV unregister the device.
902 * @ap_dev: pointer to the bus device
903 * @flags: pointer to control flags, bit 2^0 is set if another poll is
904 * required, bit 2^1 is set if the poll timer needs to get armed
905 * Returns 0.
906 */
907static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
908{
909 int rc;
910
911 rc = ap_poll_read(ap_dev, flags);
912 if (rc)
913 return rc;
914 return ap_poll_write(ap_dev, flags);
915}
916
917/**
918 * Queue a message to a device.
919 * @ap_dev: pointer to the AP device
920 * @ap_msg: the message to be queued
921 */
922static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
923{
924 struct ap_queue_status status;
925
926 if (list_empty(&ap_dev->requestq) &&
927 ap_dev->queue_count < ap_dev->queue_depth) {
928 status = __ap_send(ap_dev->qid, ap_msg->psmid,
929 ap_msg->message, ap_msg->length);
930 switch (status.response_code) {
931 case AP_RESPONSE_NORMAL:
932 list_add_tail(&ap_msg->list, &ap_dev->pendingq);
933 atomic_inc(&ap_poll_requests);
934 ap_dev->pendingq_count++;
935 ap_dev->queue_count++;
936 ap_dev->total_request_count++;
937 break;
938 case AP_RESPONSE_Q_FULL:
939 list_add_tail(&ap_msg->list, &ap_dev->requestq);
940 ap_dev->requestq_count++;
941 ap_dev->total_request_count++;
942 return -EBUSY;
943 case AP_RESPONSE_MESSAGE_TOO_BIG:
944 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
945 return -EINVAL;
946 default: /* Device is gone. */
947 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
948 return -ENODEV;
949 }
950 } else {
951 list_add_tail(&ap_msg->list, &ap_dev->requestq);
952 ap_dev->requestq_count++;
953 ap_dev->total_request_count++;
954 return -EBUSY;
955 }
956 ap_schedule_poll_timer();
957 return 0;
958}
959
960void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
961{
962 unsigned long flags;
963 int rc;
964
965 spin_lock_bh(&ap_dev->lock);
966 if (!ap_dev->unregistered) {
967 /* Make room on the queue by polling for finished requests. */
968 rc = ap_poll_queue(ap_dev, &flags);
969 if (!rc)
970 rc = __ap_queue_message(ap_dev, ap_msg);
971 if (!rc)
972 wake_up(&ap_poll_wait);
973 } else {
974 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
975 rc = 0;
976 }
977 spin_unlock_bh(&ap_dev->lock);
978 if (rc == -ENODEV)
979 device_unregister(&ap_dev->device);
980}
981EXPORT_SYMBOL(ap_queue_message);
982
983/**
984 * Cancel a crypto request. This is done by removing the request
985 * from the devive pendingq or requestq queue. Note that the
986 * request stays on the AP queue. When it finishes the message
987 * reply will be discarded because the psmid can't be found.
988 * @ap_dev: AP device that has the message queued
989 * @ap_msg: the message that is to be removed
990 */
991void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
992{
993 struct ap_message *tmp;
994
995 spin_lock_bh(&ap_dev->lock);
996 if (!list_empty(&ap_msg->list)) {
997 list_for_each_entry(tmp, &ap_dev->pendingq, list)
998 if (tmp->psmid == ap_msg->psmid) {
999 ap_dev->pendingq_count--;
1000 goto found;
1001 }
1002 ap_dev->requestq_count--;
1003 found:
1004 list_del_init(&ap_msg->list);
1005 }
1006 spin_unlock_bh(&ap_dev->lock);
1007}
1008EXPORT_SYMBOL(ap_cancel_message);
1009
1010/**
1011 * AP receive polling for finished AP requests
1012 */
1013static void ap_poll_timeout(unsigned long unused)
1014{
1015 tasklet_schedule(&ap_tasklet);
1016}
1017
1018/**
1019 * Poll all AP devices on the bus in a round robin fashion. Continue
1020 * polling until bit 2^0 of the control flags is not set. If bit 2^1
1021 * of the control flags has been set arm the poll timer.
1022 */
1023static int __ap_poll_all(struct device *dev, void *data)
1024{
1025 struct ap_device *ap_dev = to_ap_dev(dev);
1026 int rc;
1027
1028 spin_lock(&ap_dev->lock);
1029 if (!ap_dev->unregistered) {
1030 rc = ap_poll_queue(to_ap_dev(dev), (unsigned long *) data);
1031 } else
1032 rc = 0;
1033 spin_unlock(&ap_dev->lock);
1034 if (rc)
1035 device_unregister(&ap_dev->device);
1036 return 0;
1037}
1038
1039static void ap_poll_all(unsigned long dummy)
1040{
1041 unsigned long flags;
1042
1043 do {
1044 flags = 0;
1045 bus_for_each_dev(&ap_bus_type, NULL, &flags, __ap_poll_all);
1046 } while (flags & 1);
1047 if (flags & 2)
1048 ap_schedule_poll_timer();
1049}
1050
1051/**
1052 * AP bus poll thread. The purpose of this thread is to poll for
1053 * finished requests in a loop if there is a "free" cpu - that is
1054 * a cpu that doesn't have anything better to do. The polling stops
1055 * as soon as there is another task or if all messages have been
1056 * delivered.
1057 */
1058static int ap_poll_thread(void *data)
1059{
1060 DECLARE_WAITQUEUE(wait, current);
1061 unsigned long flags;
1062 int requests;
1063
1064 set_user_nice(current, -20);
1065 while (1) {
1066 if (need_resched()) {
1067 schedule();
1068 continue;
1069 }
1070 add_wait_queue(&ap_poll_wait, &wait);
1071 set_current_state(TASK_INTERRUPTIBLE);
1072 if (kthread_should_stop())
1073 break;
1074 requests = atomic_read(&ap_poll_requests);
1075 if (requests <= 0)
1076 schedule();
1077 set_current_state(TASK_RUNNING);
1078 remove_wait_queue(&ap_poll_wait, &wait);
1079
1080 local_bh_disable();
1081 flags = 0;
1082 bus_for_each_dev(&ap_bus_type, NULL, &flags, __ap_poll_all);
1083 local_bh_enable();
1084 }
1085 set_current_state(TASK_RUNNING);
1086 remove_wait_queue(&ap_poll_wait, &wait);
1087 return 0;
1088}
1089
1090static int ap_poll_thread_start(void)
1091{
1092 int rc;
1093
1094 mutex_lock(&ap_poll_thread_mutex);
1095 if (!ap_poll_kthread) {
1096 ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
1097 rc = IS_ERR(ap_poll_kthread) ? PTR_ERR(ap_poll_kthread) : 0;
1098 if (rc)
1099 ap_poll_kthread = NULL;
1100 }
1101 else
1102 rc = 0;
1103 mutex_unlock(&ap_poll_thread_mutex);
1104 return rc;
1105}
1106
1107static void ap_poll_thread_stop(void)
1108{
1109 mutex_lock(&ap_poll_thread_mutex);
1110 if (ap_poll_kthread) {
1111 kthread_stop(ap_poll_kthread);
1112 ap_poll_kthread = NULL;
1113 }
1114 mutex_unlock(&ap_poll_thread_mutex);
1115}
1116
1117/**
1118 * The module initialization code.
1119 */
1120int __init ap_module_init(void)
1121{
1122 int rc, i;
1123
1124 if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) {
1125 printk(KERN_WARNING "Invalid param: domain = %d. "
1126 " Not loading.\n", ap_domain_index);
1127 return -EINVAL;
1128 }
1129 if (ap_instructions_available() != 0) {
1130 printk(KERN_WARNING "AP instructions not installed.\n");
1131 return -ENODEV;
1132 }
1133
1134 /* Create /sys/bus/ap. */
1135 rc = bus_register(&ap_bus_type);
1136 if (rc)
1137 goto out;
1138 for (i = 0; ap_bus_attrs[i]; i++) {
1139 rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]);
1140 if (rc)
1141 goto out_bus;
1142 }
1143
1144 /* Create /sys/devices/ap. */
1145 ap_root_device = s390_root_dev_register("ap");
1146 rc = IS_ERR(ap_root_device) ? PTR_ERR(ap_root_device) : 0;
1147 if (rc)
1148 goto out_bus;
1149
1150 ap_work_queue = create_singlethread_workqueue("kapwork");
1151 if (!ap_work_queue) {
1152 rc = -ENOMEM;
1153 goto out_root;
1154 }
1155
1156 if (ap_select_domain() == 0)
1157 ap_scan_bus(NULL);
1158
1159 /* Setup the ap bus rescan timer. */
1160 init_timer(&ap_config_timer);
1161 ap_config_timer.function = ap_config_timeout;
1162 ap_config_timer.data = 0;
1163 ap_config_timer.expires = jiffies + ap_config_time * HZ;
1164 add_timer(&ap_config_timer);
1165
1166 /* Start the low priority AP bus poll thread. */
1167 if (ap_thread_flag) {
1168 rc = ap_poll_thread_start();
1169 if (rc)
1170 goto out_work;
1171 }
1172
1173 return 0;
1174
1175out_work:
1176 del_timer_sync(&ap_config_timer);
1177 del_timer_sync(&ap_poll_timer);
1178 destroy_workqueue(ap_work_queue);
1179out_root:
1180 s390_root_dev_unregister(ap_root_device);
1181out_bus:
1182 while (i--)
1183 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1184 bus_unregister(&ap_bus_type);
1185out:
1186 return rc;
1187}
1188
1189static int __ap_match_all(struct device *dev, void *data)
1190{
1191 return 1;
1192}
1193
1194/**
1195 * The module termination code
1196 */
1197void ap_module_exit(void)
1198{
1199 int i;
1200 struct device *dev;
1201
1202 ap_poll_thread_stop();
1203 del_timer_sync(&ap_config_timer);
1204 del_timer_sync(&ap_poll_timer);
1205 destroy_workqueue(ap_work_queue);
1206 s390_root_dev_unregister(ap_root_device);
1207 while ((dev = bus_find_device(&ap_bus_type, NULL, NULL,
1208 __ap_match_all)))
1209 {
1210 device_unregister(dev);
1211 put_device(dev);
1212 }
1213 for (i = 0; ap_bus_attrs[i]; i++)
1214 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1215 bus_unregister(&ap_bus_type);
1216}
1217
1218#ifndef CONFIG_ZCRYPT_MONOLITHIC
1219module_init(ap_module_init);
1220module_exit(ap_module_exit);
1221#endif
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
new file mode 100644
index 000000000000..83b69c01cd6e
--- /dev/null
+++ b/drivers/s390/crypto/ap_bus.h
@@ -0,0 +1,158 @@
1/*
2 * linux/drivers/s390/crypto/ap_bus.h
3 *
4 * Copyright (C) 2006 IBM Corporation
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Ralph Wuerthner <rwuerthn@de.ibm.com>
8 *
9 * Adjunct processor bus header file.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#ifndef _AP_BUS_H_
27#define _AP_BUS_H_
28
29#include <linux/device.h>
30#include <linux/mod_devicetable.h>
31#include <linux/types.h>
32
33#define AP_DEVICES 64 /* Number of AP devices. */
34#define AP_DOMAINS 16 /* Number of AP domains. */
35#define AP_MAX_RESET 90 /* Maximum number of resets. */
36#define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */
37#define AP_POLL_TIME 1 /* Time in ticks between receive polls. */
38
39extern int ap_domain_index;
40
41/**
42 * The ap_qid_t identifier of an ap queue. It contains a
43 * 6 bit device index and a 4 bit queue index (domain).
44 */
45typedef unsigned int ap_qid_t;
46
47#define AP_MKQID(_device,_queue) (((_device) & 63) << 8 | ((_queue) & 15))
48#define AP_QID_DEVICE(_qid) (((_qid) >> 8) & 63)
49#define AP_QID_QUEUE(_qid) ((_qid) & 15)
50
51/**
52 * The ap queue status word is returned by all three AP functions
53 * (PQAP, NQAP and DQAP). There's a set of flags in the first
54 * byte, followed by a 1 byte response code.
55 */
56struct ap_queue_status {
57 unsigned int queue_empty : 1;
58 unsigned int replies_waiting : 1;
59 unsigned int queue_full : 1;
60 unsigned int pad1 : 5;
61 unsigned int response_code : 8;
62 unsigned int pad2 : 16;
63};
64
65#define AP_RESPONSE_NORMAL 0x00
66#define AP_RESPONSE_Q_NOT_AVAIL 0x01
67#define AP_RESPONSE_RESET_IN_PROGRESS 0x02
68#define AP_RESPONSE_DECONFIGURED 0x03
69#define AP_RESPONSE_CHECKSTOPPED 0x04
70#define AP_RESPONSE_BUSY 0x05
71#define AP_RESPONSE_Q_FULL 0x10
72#define AP_RESPONSE_NO_PENDING_REPLY 0x10
73#define AP_RESPONSE_INDEX_TOO_BIG 0x11
74#define AP_RESPONSE_NO_FIRST_PART 0x13
75#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
76
77/**
78 * Known device types
79 */
80#define AP_DEVICE_TYPE_PCICC 3
81#define AP_DEVICE_TYPE_PCICA 4
82#define AP_DEVICE_TYPE_PCIXCC 5
83#define AP_DEVICE_TYPE_CEX2A 6
84#define AP_DEVICE_TYPE_CEX2C 7
85
86struct ap_device;
87struct ap_message;
88
89struct ap_driver {
90 struct device_driver driver;
91 struct ap_device_id *ids;
92
93 int (*probe)(struct ap_device *);
94 void (*remove)(struct ap_device *);
95 /* receive is called from tasklet context */
96 void (*receive)(struct ap_device *, struct ap_message *,
97 struct ap_message *);
98};
99
100#define to_ap_drv(x) container_of((x), struct ap_driver, driver)
101
102int ap_driver_register(struct ap_driver *, struct module *, char *);
103void ap_driver_unregister(struct ap_driver *);
104
105struct ap_device {
106 struct device device;
107 struct ap_driver *drv; /* Pointer to AP device driver. */
108 spinlock_t lock; /* Per device lock. */
109
110 ap_qid_t qid; /* AP queue id. */
111 int queue_depth; /* AP queue depth.*/
112 int device_type; /* AP device type. */
113 int unregistered; /* marks AP device as unregistered */
114
115 int queue_count; /* # messages currently on AP queue. */
116
117 struct list_head pendingq; /* List of message sent to AP queue. */
118 int pendingq_count; /* # requests on pendingq list. */
119 struct list_head requestq; /* List of message yet to be sent. */
120 int requestq_count; /* # requests on requestq list. */
121 int total_request_count; /* # requests ever for this AP device. */
122
123 struct ap_message *reply; /* Per device reply message. */
124
125 void *private; /* ap driver private pointer. */
126};
127
128#define to_ap_dev(x) container_of((x), struct ap_device, device)
129
130struct ap_message {
131 struct list_head list; /* Request queueing. */
132 unsigned long long psmid; /* Message id. */
133 void *message; /* Pointer to message buffer. */
134 size_t length; /* Message length. */
135
136 void *private; /* ap driver private pointer. */
137};
138
139#define AP_DEVICE(dt) \
140 .dev_type=(dt), \
141 .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE,
142
143/**
144 * Note: don't use ap_send/ap_recv after using ap_queue_message
145 * for the first time. Otherwise the ap message queue will get
146 * confused.
147 */
148int ap_send(ap_qid_t, unsigned long long, void *, size_t);
149int ap_recv(ap_qid_t, unsigned long long *, void *, size_t);
150
151void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
152void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
153void ap_flush_queue(struct ap_device *ap_dev);
154
155int ap_module_init(void);
156void ap_module_exit(void);
157
158#endif /* _AP_BUS_H_ */
diff --git a/drivers/s390/crypto/z90common.h b/drivers/s390/crypto/z90common.h
deleted file mode 100644
index dbbcda3c846a..000000000000
--- a/drivers/s390/crypto/z90common.h
+++ /dev/null
@@ -1,166 +0,0 @@
1/*
2 * linux/drivers/s390/crypto/z90common.h
3 *
4 * z90crypt 1.3.3
5 *
6 * Copyright (C) 2001, 2005 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#ifndef _Z90COMMON_H_
28#define _Z90COMMON_H_
29
30
31#define RESPBUFFSIZE 256
32#define PCI_FUNC_KEY_DECRYPT 0x5044
33#define PCI_FUNC_KEY_ENCRYPT 0x504B
34extern int ext_bitlens;
35
36enum devstat {
37 DEV_GONE,
38 DEV_ONLINE,
39 DEV_QUEUE_FULL,
40 DEV_EMPTY,
41 DEV_NO_WORK,
42 DEV_BAD_MESSAGE,
43 DEV_TSQ_EXCEPTION,
44 DEV_RSQ_EXCEPTION,
45 DEV_SEN_EXCEPTION,
46 DEV_REC_EXCEPTION
47};
48
49enum hdstat {
50 HD_NOT_THERE,
51 HD_BUSY,
52 HD_DECONFIGURED,
53 HD_CHECKSTOPPED,
54 HD_ONLINE,
55 HD_TSQ_EXCEPTION
56};
57
58#define Z90C_NO_DEVICES 1
59#define Z90C_AMBIGUOUS_DOMAIN 2
60#define Z90C_INCORRECT_DOMAIN 3
61#define ENOTINIT 4
62
63#define SEN_BUSY 7
64#define SEN_USER_ERROR 8
65#define SEN_QUEUE_FULL 11
66#define SEN_NOT_AVAIL 16
67#define SEN_PAD_ERROR 17
68#define SEN_RETRY 18
69#define SEN_RELEASED 24
70
71#define REC_EMPTY 4
72#define REC_BUSY 6
73#define REC_OPERAND_INV 8
74#define REC_OPERAND_SIZE 9
75#define REC_EVEN_MOD 10
76#define REC_NO_WORK 11
77#define REC_HARDWAR_ERR 12
78#define REC_NO_RESPONSE 13
79#define REC_RETRY_DEV 14
80#define REC_USER_GONE 15
81#define REC_BAD_MESSAGE 16
82#define REC_INVALID_PAD 17
83#define REC_USE_PCICA 18
84
85#define WRONG_DEVICE_TYPE 20
86
87#define REC_FATAL_ERROR 32
88#define SEN_FATAL_ERROR 33
89#define TSQ_FATAL_ERROR 34
90#define RSQ_FATAL_ERROR 35
91
92#define Z90CRYPT_NUM_TYPES 6
93#define PCICA 0
94#define PCICC 1
95#define PCIXCC_MCL2 2
96#define PCIXCC_MCL3 3
97#define CEX2C 4
98#define CEX2A 5
99#define NILDEV -1
100#define ANYDEV -1
101#define PCIXCC_UNK -2
102
103enum hdevice_type {
104 PCICC_HW = 3,
105 PCICA_HW = 4,
106 PCIXCC_HW = 5,
107 CEX2A_HW = 6,
108 CEX2C_HW = 7
109};
110
111struct CPRBX {
112 unsigned short cprb_len;
113 unsigned char cprb_ver_id;
114 unsigned char pad_000[3];
115 unsigned char func_id[2];
116 unsigned char cprb_flags[4];
117 unsigned int req_parml;
118 unsigned int req_datal;
119 unsigned int rpl_msgbl;
120 unsigned int rpld_parml;
121 unsigned int rpl_datal;
122 unsigned int rpld_datal;
123 unsigned int req_extbl;
124 unsigned char pad_001[4];
125 unsigned int rpld_extbl;
126 unsigned char req_parmb[16];
127 unsigned char req_datab[16];
128 unsigned char rpl_parmb[16];
129 unsigned char rpl_datab[16];
130 unsigned char req_extb[16];
131 unsigned char rpl_extb[16];
132 unsigned short ccp_rtcode;
133 unsigned short ccp_rscode;
134 unsigned int mac_data_len;
135 unsigned char logon_id[8];
136 unsigned char mac_value[8];
137 unsigned char mac_content_flgs;
138 unsigned char pad_002;
139 unsigned short domain;
140 unsigned char pad_003[12];
141 unsigned char pad_004[36];
142};
143
144#ifndef DEV_NAME
145#define DEV_NAME "z90crypt"
146#endif
147#define PRINTK(fmt, args...) \
148 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
149#define PRINTKN(fmt, args...) \
150 printk(KERN_DEBUG DEV_NAME ": " fmt, ## args)
151#define PRINTKW(fmt, args...) \
152 printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
153#define PRINTKC(fmt, args...) \
154 printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
155
156#ifdef Z90CRYPT_DEBUG
157#define PDEBUG(fmt, args...) \
158 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
159#else
160#define PDEBUG(fmt, args...) do {} while (0)
161#endif
162
163#define UMIN(a,b) ((a) < (b) ? (a) : (b))
164#define IS_EVEN(x) ((x) == (2 * ((x) / 2)))
165
166#endif
diff --git a/drivers/s390/crypto/z90crypt.h b/drivers/s390/crypto/z90crypt.h
deleted file mode 100644
index 0ca1d126ccb6..000000000000
--- a/drivers/s390/crypto/z90crypt.h
+++ /dev/null
@@ -1,71 +0,0 @@
1/*
2 * linux/drivers/s390/crypto/z90crypt.h
3 *
4 * z90crypt 1.3.3 (kernel-private header)
5 *
6 * Copyright (C) 2001, 2005 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#ifndef _Z90CRYPT_H_
28#define _Z90CRYPT_H_
29
30#include <asm/z90crypt.h>
31
32/**
33 * local errno definitions
34 */
35#define ENOBUFF 129 // filp->private_data->...>work_elem_p->buffer is NULL
36#define EWORKPEND 130 // user issues ioctl while another pending
37#define ERELEASED 131 // user released while ioctl pending
38#define EQUIESCE 132 // z90crypt quiescing (no more work allowed)
39#define ETIMEOUT 133 // request timed out
40#define EUNKNOWN 134 // some unrecognized error occured (retry may succeed)
41#define EGETBUFF 135 // Error getting buffer or hardware lacks capability
42 // (retry in software)
43
44/**
45 * DEPRECATED STRUCTURES
46 */
47
48/**
49 * This structure is DEPRECATED and the corresponding ioctl() has been
50 * replaced with individual ioctl()s for each piece of data!
51 * This structure will NOT survive past version 1.3.1, so switch to the
52 * new ioctl()s.
53 */
54#define MASK_LENGTH 64 // mask length
55struct ica_z90_status {
56 int totalcount;
57 int leedslitecount; // PCICA
58 int leeds2count; // PCICC
59 // int PCIXCCCount; is not in struct for backward compatibility
60 int requestqWaitCount;
61 int pendingqWaitCount;
62 int totalOpenCount;
63 int cryptoDomain;
64 // status: 0=not there, 1=PCICA, 2=PCICC, 3=PCIXCC_MCL2, 4=PCIXCC_MCL3,
65 // 5=CEX2C
66 unsigned char status[MASK_LENGTH];
67 // qdepth: # work elements waiting for each device
68 unsigned char qdepth[MASK_LENGTH];
69};
70
71#endif /* _Z90CRYPT_H_ */
diff --git a/drivers/s390/crypto/z90hardware.c b/drivers/s390/crypto/z90hardware.c
deleted file mode 100644
index be60795f4a74..000000000000
--- a/drivers/s390/crypto/z90hardware.c
+++ /dev/null
@@ -1,2531 +0,0 @@
1/*
2 * linux/drivers/s390/crypto/z90hardware.c
3 *
4 * z90crypt 1.3.3
5 *
6 * Copyright (C) 2001, 2005 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#include <asm/uaccess.h>
28#include <linux/compiler.h>
29#include <linux/delay.h>
30#include <linux/init.h>
31#include <linux/module.h>
32#include "z90crypt.h"
33#include "z90common.h"
34
35struct cca_token_hdr {
36 unsigned char token_identifier;
37 unsigned char version;
38 unsigned short token_length;
39 unsigned char reserved[4];
40};
41
42#define CCA_TKN_HDR_ID_EXT 0x1E
43
44struct cca_private_ext_ME_sec {
45 unsigned char section_identifier;
46 unsigned char version;
47 unsigned short section_length;
48 unsigned char private_key_hash[20];
49 unsigned char reserved1[4];
50 unsigned char key_format;
51 unsigned char reserved2;
52 unsigned char key_name_hash[20];
53 unsigned char key_use_flags[4];
54 unsigned char reserved3[6];
55 unsigned char reserved4[24];
56 unsigned char confounder[24];
57 unsigned char exponent[128];
58 unsigned char modulus[128];
59};
60
61#define CCA_PVT_USAGE_ALL 0x80
62
63struct cca_public_sec {
64 unsigned char section_identifier;
65 unsigned char version;
66 unsigned short section_length;
67 unsigned char reserved[2];
68 unsigned short exponent_len;
69 unsigned short modulus_bit_len;
70 unsigned short modulus_byte_len;
71 unsigned char exponent[3];
72};
73
74struct cca_private_ext_ME {
75 struct cca_token_hdr pvtMEHdr;
76 struct cca_private_ext_ME_sec pvtMESec;
77 struct cca_public_sec pubMESec;
78};
79
80struct cca_public_key {
81 struct cca_token_hdr pubHdr;
82 struct cca_public_sec pubSec;
83};
84
85struct cca_pvt_ext_CRT_sec {
86 unsigned char section_identifier;
87 unsigned char version;
88 unsigned short section_length;
89 unsigned char private_key_hash[20];
90 unsigned char reserved1[4];
91 unsigned char key_format;
92 unsigned char reserved2;
93 unsigned char key_name_hash[20];
94 unsigned char key_use_flags[4];
95 unsigned short p_len;
96 unsigned short q_len;
97 unsigned short dp_len;
98 unsigned short dq_len;
99 unsigned short u_len;
100 unsigned short mod_len;
101 unsigned char reserved3[4];
102 unsigned short pad_len;
103 unsigned char reserved4[52];
104 unsigned char confounder[8];
105};
106
107#define CCA_PVT_EXT_CRT_SEC_ID_PVT 0x08
108#define CCA_PVT_EXT_CRT_SEC_FMT_CL 0x40
109
110struct cca_private_ext_CRT {
111 struct cca_token_hdr pvtCrtHdr;
112 struct cca_pvt_ext_CRT_sec pvtCrtSec;
113 struct cca_public_sec pubCrtSec;
114};
115
116struct ap_status_word {
117 unsigned char q_stat_flags;
118 unsigned char response_code;
119 unsigned char reserved[2];
120};
121
122#define AP_Q_STATUS_EMPTY 0x80
123#define AP_Q_STATUS_REPLIES_WAITING 0x40
124#define AP_Q_STATUS_ARRAY_FULL 0x20
125
126#define AP_RESPONSE_NORMAL 0x00
127#define AP_RESPONSE_Q_NOT_AVAIL 0x01
128#define AP_RESPONSE_RESET_IN_PROGRESS 0x02
129#define AP_RESPONSE_DECONFIGURED 0x03
130#define AP_RESPONSE_CHECKSTOPPED 0x04
131#define AP_RESPONSE_BUSY 0x05
132#define AP_RESPONSE_Q_FULL 0x10
133#define AP_RESPONSE_NO_PENDING_REPLY 0x10
134#define AP_RESPONSE_INDEX_TOO_BIG 0x11
135#define AP_RESPONSE_NO_FIRST_PART 0x13
136#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
137
138#define AP_MAX_CDX_BITL 4
139#define AP_RQID_RESERVED_BITL 4
140#define SKIP_BITL (AP_MAX_CDX_BITL + AP_RQID_RESERVED_BITL)
141
142struct type4_hdr {
143 unsigned char reserved1;
144 unsigned char msg_type_code;
145 unsigned short msg_len;
146 unsigned char request_code;
147 unsigned char msg_fmt;
148 unsigned short reserved2;
149};
150
151#define TYPE4_TYPE_CODE 0x04
152#define TYPE4_REQU_CODE 0x40
153
154#define TYPE4_SME_LEN 0x0188
155#define TYPE4_LME_LEN 0x0308
156#define TYPE4_SCR_LEN 0x01E0
157#define TYPE4_LCR_LEN 0x03A0
158
159#define TYPE4_SME_FMT 0x00
160#define TYPE4_LME_FMT 0x10
161#define TYPE4_SCR_FMT 0x40
162#define TYPE4_LCR_FMT 0x50
163
164struct type4_sme {
165 struct type4_hdr header;
166 unsigned char message[128];
167 unsigned char exponent[128];
168 unsigned char modulus[128];
169};
170
171struct type4_lme {
172 struct type4_hdr header;
173 unsigned char message[256];
174 unsigned char exponent[256];
175 unsigned char modulus[256];
176};
177
178struct type4_scr {
179 struct type4_hdr header;
180 unsigned char message[128];
181 unsigned char dp[72];
182 unsigned char dq[64];
183 unsigned char p[72];
184 unsigned char q[64];
185 unsigned char u[72];
186};
187
188struct type4_lcr {
189 struct type4_hdr header;
190 unsigned char message[256];
191 unsigned char dp[136];
192 unsigned char dq[128];
193 unsigned char p[136];
194 unsigned char q[128];
195 unsigned char u[136];
196};
197
198union type4_msg {
199 struct type4_sme sme;
200 struct type4_lme lme;
201 struct type4_scr scr;
202 struct type4_lcr lcr;
203};
204
205struct type84_hdr {
206 unsigned char reserved1;
207 unsigned char code;
208 unsigned short len;
209 unsigned char reserved2[4];
210};
211
212#define TYPE84_RSP_CODE 0x84
213
214struct type6_hdr {
215 unsigned char reserved1;
216 unsigned char type;
217 unsigned char reserved2[2];
218 unsigned char right[4];
219 unsigned char reserved3[2];
220 unsigned char reserved4[2];
221 unsigned char apfs[4];
222 unsigned int offset1;
223 unsigned int offset2;
224 unsigned int offset3;
225 unsigned int offset4;
226 unsigned char agent_id[16];
227 unsigned char rqid[2];
228 unsigned char reserved5[2];
229 unsigned char function_code[2];
230 unsigned char reserved6[2];
231 unsigned int ToCardLen1;
232 unsigned int ToCardLen2;
233 unsigned int ToCardLen3;
234 unsigned int ToCardLen4;
235 unsigned int FromCardLen1;
236 unsigned int FromCardLen2;
237 unsigned int FromCardLen3;
238 unsigned int FromCardLen4;
239};
240
241struct CPRB {
242 unsigned char cprb_len[2];
243 unsigned char cprb_ver_id;
244 unsigned char pad_000;
245 unsigned char srpi_rtcode[4];
246 unsigned char srpi_verb;
247 unsigned char flags;
248 unsigned char func_id[2];
249 unsigned char checkpoint_flag;
250 unsigned char resv2;
251 unsigned char req_parml[2];
252 unsigned char req_parmp[4];
253 unsigned char req_datal[4];
254 unsigned char req_datap[4];
255 unsigned char rpl_parml[2];
256 unsigned char pad_001[2];
257 unsigned char rpl_parmp[4];
258 unsigned char rpl_datal[4];
259 unsigned char rpl_datap[4];
260 unsigned char ccp_rscode[2];
261 unsigned char ccp_rtcode[2];
262 unsigned char repd_parml[2];
263 unsigned char mac_data_len[2];
264 unsigned char repd_datal[4];
265 unsigned char req_pc[2];
266 unsigned char res_origin[8];
267 unsigned char mac_value[8];
268 unsigned char logon_id[8];
269 unsigned char usage_domain[2];
270 unsigned char resv3[18];
271 unsigned char svr_namel[2];
272 unsigned char svr_name[8];
273};
274
275struct type6_msg {
276 struct type6_hdr header;
277 struct CPRB CPRB;
278};
279
280struct type86_hdr {
281 unsigned char reserved1;
282 unsigned char type;
283 unsigned char format;
284 unsigned char reserved2;
285 unsigned char reply_code;
286 unsigned char reserved3[3];
287};
288
289#define TYPE86_RSP_CODE 0x86
290#define TYPE86_FMT2 0x02
291
292struct type86_fmt2_msg {
293 struct type86_hdr header;
294 unsigned char reserved[4];
295 unsigned char apfs[4];
296 unsigned int count1;
297 unsigned int offset1;
298 unsigned int count2;
299 unsigned int offset2;
300 unsigned int count3;
301 unsigned int offset3;
302 unsigned int count4;
303 unsigned int offset4;
304};
305
306static struct type6_hdr static_type6_hdr = {
307 0x00,
308 0x06,
309 {0x00,0x00},
310 {0x00,0x00,0x00,0x00},
311 {0x00,0x00},
312 {0x00,0x00},
313 {0x00,0x00,0x00,0x00},
314 0x00000058,
315 0x00000000,
316 0x00000000,
317 0x00000000,
318 {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50,
319 0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01},
320 {0x00,0x00},
321 {0x00,0x00},
322 {0x50,0x44},
323 {0x00,0x00},
324 0x00000000,
325 0x00000000,
326 0x00000000,
327 0x00000000,
328 0x00000000,
329 0x00000000,
330 0x00000000,
331 0x00000000
332};
333
334static struct type6_hdr static_type6_hdrX = {
335 0x00,
336 0x06,
337 {0x00,0x00},
338 {0x00,0x00,0x00,0x00},
339 {0x00,0x00},
340 {0x00,0x00},
341 {0x00,0x00,0x00,0x00},
342 0x00000058,
343 0x00000000,
344 0x00000000,
345 0x00000000,
346 {0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,
347 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
348 {0x00,0x00},
349 {0x00,0x00},
350 {0x50,0x44},
351 {0x00,0x00},
352 0x00000000,
353 0x00000000,
354 0x00000000,
355 0x00000000,
356 0x00000000,
357 0x00000000,
358 0x00000000,
359 0x00000000
360};
361
362static struct CPRB static_cprb = {
363 {0x70,0x00},
364 0x41,
365 0x00,
366 {0x00,0x00,0x00,0x00},
367 0x00,
368 0x00,
369 {0x54,0x32},
370 0x01,
371 0x00,
372 {0x00,0x00},
373 {0x00,0x00,0x00,0x00},
374 {0x00,0x00,0x00,0x00},
375 {0x00,0x00,0x00,0x00},
376 {0x00,0x00},
377 {0x00,0x00},
378 {0x00,0x00,0x00,0x00},
379 {0x00,0x00,0x00,0x00},
380 {0x00,0x00,0x00,0x00},
381 {0x00,0x00},
382 {0x00,0x00},
383 {0x00,0x00},
384 {0x00,0x00},
385 {0x00,0x00,0x00,0x00},
386 {0x00,0x00},
387 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
388 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
389 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
390 {0x00,0x00},
391 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
392 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
393 0x00,0x00},
394 {0x08,0x00},
395 {0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20}
396};
397
398struct function_and_rules_block {
399 unsigned char function_code[2];
400 unsigned char ulen[2];
401 unsigned char only_rule[8];
402};
403
404static struct function_and_rules_block static_pkd_function_and_rules = {
405 {0x50,0x44},
406 {0x0A,0x00},
407 {'P','K','C','S','-','1','.','2'}
408};
409
410static struct function_and_rules_block static_pke_function_and_rules = {
411 {0x50,0x4B},
412 {0x0A,0x00},
413 {'P','K','C','S','-','1','.','2'}
414};
415
416struct T6_keyBlock_hdr {
417 unsigned char blen[2];
418 unsigned char ulen[2];
419 unsigned char flags[2];
420};
421
422static struct T6_keyBlock_hdr static_T6_keyBlock_hdr = {
423 {0x89,0x01},
424 {0x87,0x01},
425 {0x00}
426};
427
428static struct CPRBX static_cprbx = {
429 0x00DC,
430 0x02,
431 {0x00,0x00,0x00},
432 {0x54,0x32},
433 {0x00,0x00,0x00,0x00},
434 0x00000000,
435 0x00000000,
436 0x00000000,
437 0x00000000,
438 0x00000000,
439 0x00000000,
440 0x00000000,
441 {0x00,0x00,0x00,0x00},
442 0x00000000,
443 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
444 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
445 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
446 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
447 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
448 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
449 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
450 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
451 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
452 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
453 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
454 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
455 0x0000,
456 0x0000,
457 0x00000000,
458 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
459 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
460 0x00,
461 0x00,
462 0x0000,
463 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
464 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
465 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
466 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}
467};
468
469static struct function_and_rules_block static_pkd_function_and_rulesX_MCL2 = {
470 {0x50,0x44},
471 {0x00,0x0A},
472 {'P','K','C','S','-','1','.','2'}
473};
474
475static struct function_and_rules_block static_pke_function_and_rulesX_MCL2 = {
476 {0x50,0x4B},
477 {0x00,0x0A},
478 {'Z','E','R','O','-','P','A','D'}
479};
480
481static struct function_and_rules_block static_pkd_function_and_rulesX = {
482 {0x50,0x44},
483 {0x00,0x0A},
484 {'Z','E','R','O','-','P','A','D'}
485};
486
487static struct function_and_rules_block static_pke_function_and_rulesX = {
488 {0x50,0x4B},
489 {0x00,0x0A},
490 {'M','R','P',' ',' ',' ',' ',' '}
491};
492
493static unsigned char static_PKE_function_code[2] = {0x50, 0x4B};
494
495struct T6_keyBlock_hdrX {
496 unsigned short blen;
497 unsigned short ulen;
498 unsigned char flags[2];
499};
500
501static unsigned char static_pad[256] = {
5020x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD,0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57,
5030x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B,0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39,
5040xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5,0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D,
5050x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB,0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F,
5060x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9,0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45,
5070x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9,0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F,
5080x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD,0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D,
5090xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD,0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9,
5100x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B,0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B,
5110x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B,0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD,
5120x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7,0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1,
5130x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3,0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23,
5140x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55,0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43,
5150x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F,0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F,
5160x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5,0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD,
5170x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41,0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09
518};
519
520static struct cca_private_ext_ME static_pvt_me_key = {
521 {
522 0x1E,
523 0x00,
524 0x0183,
525 {0x00,0x00,0x00,0x00}
526 },
527
528 {
529 0x02,
530 0x00,
531 0x016C,
532 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
533 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
534 0x00,0x00,0x00,0x00},
535 {0x00,0x00,0x00,0x00},
536 0x00,
537 0x00,
538 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
539 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
540 0x00,0x00,0x00,0x00},
541 {0x80,0x00,0x00,0x00},
542 {0x00,0x00,0x00,0x00,0x00,0x00},
543 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
544 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
545 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
546 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
547 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
548 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
549 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
550 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
551 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
552 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
553 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
554 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
555 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
556 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
557 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
558 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
559 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
560 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
561 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
562 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
563 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
564 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00},
565 {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
566 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
567 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
568 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
569 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
570 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
571 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
572 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
573 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
574 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
575 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
576 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
577 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
578 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
579 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
580 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}
581 },
582
583 {
584 0x04,
585 0x00,
586 0x000F,
587 {0x00,0x00},
588 0x0003,
589 0x0000,
590 0x0000,
591 {0x01,0x00,0x01}
592 }
593};
594
595static struct cca_public_key static_public_key = {
596 {
597 0x1E,
598 0x00,
599 0x0000,
600 {0x00,0x00,0x00,0x00}
601 },
602
603 {
604 0x04,
605 0x00,
606 0x0000,
607 {0x00,0x00},
608 0x0000,
609 0x0000,
610 0x0000,
611 {0x01,0x00,0x01}
612 }
613};
614
615#define FIXED_TYPE6_ME_LEN 0x0000025F
616
617#define FIXED_TYPE6_ME_EN_LEN 0x000000F0
618
619#define FIXED_TYPE6_ME_LENX 0x000002CB
620
621#define FIXED_TYPE6_ME_EN_LENX 0x0000015C
622
623static struct cca_public_sec static_cca_pub_sec = {
624 0x04,
625 0x00,
626 0x000f,
627 {0x00,0x00},
628 0x0003,
629 0x0000,
630 0x0000,
631 {0x01,0x00,0x01}
632};
633
634#define FIXED_TYPE6_CR_LEN 0x00000177
635
636#define FIXED_TYPE6_CR_LENX 0x000001E3
637
638#define MAX_RESPONSE_SIZE 0x00000710
639
640#define MAX_RESPONSEX_SIZE 0x0000077C
641
642#define RESPONSE_CPRB_SIZE 0x000006B8
643#define RESPONSE_CPRBX_SIZE 0x00000724
644
645struct type50_hdr {
646 u8 reserved1;
647 u8 msg_type_code;
648 u16 msg_len;
649 u8 reserved2;
650 u8 ignored;
651 u16 reserved3;
652};
653
654#define TYPE50_TYPE_CODE 0x50
655
656#define TYPE50_MEB1_LEN (sizeof(struct type50_meb1_msg))
657#define TYPE50_MEB2_LEN (sizeof(struct type50_meb2_msg))
658#define TYPE50_CRB1_LEN (sizeof(struct type50_crb1_msg))
659#define TYPE50_CRB2_LEN (sizeof(struct type50_crb2_msg))
660
661#define TYPE50_MEB1_FMT 0x0001
662#define TYPE50_MEB2_FMT 0x0002
663#define TYPE50_CRB1_FMT 0x0011
664#define TYPE50_CRB2_FMT 0x0012
665
666struct type50_meb1_msg {
667 struct type50_hdr header;
668 u16 keyblock_type;
669 u8 reserved[6];
670 u8 exponent[128];
671 u8 modulus[128];
672 u8 message[128];
673};
674
675struct type50_meb2_msg {
676 struct type50_hdr header;
677 u16 keyblock_type;
678 u8 reserved[6];
679 u8 exponent[256];
680 u8 modulus[256];
681 u8 message[256];
682};
683
684struct type50_crb1_msg {
685 struct type50_hdr header;
686 u16 keyblock_type;
687 u8 reserved[6];
688 u8 p[64];
689 u8 q[64];
690 u8 dp[64];
691 u8 dq[64];
692 u8 u[64];
693 u8 message[128];
694};
695
696struct type50_crb2_msg {
697 struct type50_hdr header;
698 u16 keyblock_type;
699 u8 reserved[6];
700 u8 p[128];
701 u8 q[128];
702 u8 dp[128];
703 u8 dq[128];
704 u8 u[128];
705 u8 message[256];
706};
707
708union type50_msg {
709 struct type50_meb1_msg meb1;
710 struct type50_meb2_msg meb2;
711 struct type50_crb1_msg crb1;
712 struct type50_crb2_msg crb2;
713};
714
715struct type80_hdr {
716 u8 reserved1;
717 u8 type;
718 u16 len;
719 u8 code;
720 u8 reserved2[3];
721 u8 reserved3[8];
722};
723
724#define TYPE80_RSP_CODE 0x80
725
726struct error_hdr {
727 unsigned char reserved1;
728 unsigned char type;
729 unsigned char reserved2[2];
730 unsigned char reply_code;
731 unsigned char reserved3[3];
732};
733
734#define TYPE82_RSP_CODE 0x82
735#define TYPE88_RSP_CODE 0x88
736
737#define REP82_ERROR_MACHINE_FAILURE 0x10
738#define REP82_ERROR_PREEMPT_FAILURE 0x12
739#define REP82_ERROR_CHECKPT_FAILURE 0x14
740#define REP82_ERROR_MESSAGE_TYPE 0x20
741#define REP82_ERROR_INVALID_COMM_CD 0x21
742#define REP82_ERROR_INVALID_MSG_LEN 0x23
743#define REP82_ERROR_RESERVD_FIELD 0x24
744#define REP82_ERROR_FORMAT_FIELD 0x29
745#define REP82_ERROR_INVALID_COMMAND 0x30
746#define REP82_ERROR_MALFORMED_MSG 0x40
747#define REP82_ERROR_RESERVED_FIELDO 0x50
748#define REP82_ERROR_WORD_ALIGNMENT 0x60
749#define REP82_ERROR_MESSAGE_LENGTH 0x80
750#define REP82_ERROR_OPERAND_INVALID 0x82
751#define REP82_ERROR_OPERAND_SIZE 0x84
752#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
753#define REP82_ERROR_RESERVED_FIELD 0x88
754#define REP82_ERROR_TRANSPORT_FAIL 0x90
755#define REP82_ERROR_PACKET_TRUNCATED 0xA0
756#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
757
758#define REP88_ERROR_MODULE_FAILURE 0x10
759#define REP88_ERROR_MODULE_TIMEOUT 0x11
760#define REP88_ERROR_MODULE_NOTINIT 0x13
761#define REP88_ERROR_MODULE_NOTAVAIL 0x14
762#define REP88_ERROR_MODULE_DISABLED 0x15
763#define REP88_ERROR_MODULE_IN_DIAGN 0x17
764#define REP88_ERROR_FASTPATH_DISABLD 0x19
765#define REP88_ERROR_MESSAGE_TYPE 0x20
766#define REP88_ERROR_MESSAGE_MALFORMD 0x22
767#define REP88_ERROR_MESSAGE_LENGTH 0x23
768#define REP88_ERROR_RESERVED_FIELD 0x24
769#define REP88_ERROR_KEY_TYPE 0x34
770#define REP88_ERROR_INVALID_KEY 0x82
771#define REP88_ERROR_OPERAND 0x84
772#define REP88_ERROR_OPERAND_EVEN_MOD 0x85
773
774#define CALLER_HEADER 12
775
776static inline int
777testq(int q_nr, int *q_depth, int *dev_type, struct ap_status_word *stat)
778{
779 int ccode;
780
781 asm volatile
782#ifdef CONFIG_64BIT
783 (" llgfr 0,%4 \n"
784 " slgr 1,1 \n"
785 " lgr 2,1 \n"
786 "0: .long 0xb2af0000 \n"
787 "1: ipm %0 \n"
788 " srl %0,28 \n"
789 " iihh %0,0 \n"
790 " iihl %0,0 \n"
791 " lgr %1,1 \n"
792 " lgr %3,2 \n"
793 " srl %3,24 \n"
794 " sll 2,24 \n"
795 " srl 2,24 \n"
796 " lgr %2,2 \n"
797 "2: \n"
798 ".section .fixup,\"ax\" \n"
799 "3: \n"
800 " lhi %0,%h5 \n"
801 " jg 2b \n"
802 ".previous \n"
803 ".section __ex_table,\"a\" \n"
804 " .align 8 \n"
805 " .quad 0b,3b \n"
806 " .quad 1b,3b \n"
807 ".previous"
808 :"=d" (ccode),"=d" (*stat),"=d" (*q_depth), "=d" (*dev_type)
809 :"d" (q_nr), "K" (DEV_TSQ_EXCEPTION)
810 :"cc","0","1","2","memory");
811#else
812 (" lr 0,%4 \n"
813 " slr 1,1 \n"
814 " lr 2,1 \n"
815 "0: .long 0xb2af0000 \n"
816 "1: ipm %0 \n"
817 " srl %0,28 \n"
818 " lr %1,1 \n"
819 " lr %3,2 \n"
820 " srl %3,24 \n"
821 " sll 2,24 \n"
822 " srl 2,24 \n"
823 " lr %2,2 \n"
824 "2: \n"
825 ".section .fixup,\"ax\" \n"
826 "3: \n"
827 " lhi %0,%h5 \n"
828 " bras 1,4f \n"
829 " .long 2b \n"
830 "4: \n"
831 " l 1,0(1) \n"
832 " br 1 \n"
833 ".previous \n"
834 ".section __ex_table,\"a\" \n"
835 " .align 4 \n"
836 " .long 0b,3b \n"
837 " .long 1b,3b \n"
838 ".previous"
839 :"=d" (ccode),"=d" (*stat),"=d" (*q_depth), "=d" (*dev_type)
840 :"d" (q_nr), "K" (DEV_TSQ_EXCEPTION)
841 :"cc","0","1","2","memory");
842#endif
843 return ccode;
844}
845
846static inline int
847resetq(int q_nr, struct ap_status_word *stat_p)
848{
849 int ccode;
850
851 asm volatile
852#ifdef CONFIG_64BIT
853 (" llgfr 0,%2 \n"
854 " lghi 1,1 \n"
855 " sll 1,24 \n"
856 " or 0,1 \n"
857 " slgr 1,1 \n"
858 " lgr 2,1 \n"
859 "0: .long 0xb2af0000 \n"
860 "1: ipm %0 \n"
861 " srl %0,28 \n"
862 " iihh %0,0 \n"
863 " iihl %0,0 \n"
864 " lgr %1,1 \n"
865 "2: \n"
866 ".section .fixup,\"ax\" \n"
867 "3: \n"
868 " lhi %0,%h3 \n"
869 " jg 2b \n"
870 ".previous \n"
871 ".section __ex_table,\"a\" \n"
872 " .align 8 \n"
873 " .quad 0b,3b \n"
874 " .quad 1b,3b \n"
875 ".previous"
876 :"=d" (ccode),"=d" (*stat_p)
877 :"d" (q_nr), "K" (DEV_RSQ_EXCEPTION)
878 :"cc","0","1","2","memory");
879#else
880 (" lr 0,%2 \n"
881 " lhi 1,1 \n"
882 " sll 1,24 \n"
883 " or 0,1 \n"
884 " slr 1,1 \n"
885 " lr 2,1 \n"
886 "0: .long 0xb2af0000 \n"
887 "1: ipm %0 \n"
888 " srl %0,28 \n"
889 " lr %1,1 \n"
890 "2: \n"
891 ".section .fixup,\"ax\" \n"
892 "3: \n"
893 " lhi %0,%h3 \n"
894 " bras 1,4f \n"
895 " .long 2b \n"
896 "4: \n"
897 " l 1,0(1) \n"
898 " br 1 \n"
899 ".previous \n"
900 ".section __ex_table,\"a\" \n"
901 " .align 4 \n"
902 " .long 0b,3b \n"
903 " .long 1b,3b \n"
904 ".previous"
905 :"=d" (ccode),"=d" (*stat_p)
906 :"d" (q_nr), "K" (DEV_RSQ_EXCEPTION)
907 :"cc","0","1","2","memory");
908#endif
909 return ccode;
910}
911
912static inline int
913sen(int msg_len, unsigned char *msg_ext, struct ap_status_word *stat)
914{
915 int ccode;
916
917 asm volatile
918#ifdef CONFIG_64BIT
919 (" lgr 6,%3 \n"
920 " llgfr 7,%2 \n"
921 " llgt 0,0(6) \n"
922 " lghi 1,64 \n"
923 " sll 1,24 \n"
924 " or 0,1 \n"
925 " la 6,4(6) \n"
926 " llgt 2,0(6) \n"
927 " llgt 3,4(6) \n"
928 " la 6,8(6) \n"
929 " slr 1,1 \n"
930 "0: .long 0xb2ad0026 \n"
931 "1: brc 2,0b \n"
932 " ipm %0 \n"
933 " srl %0,28 \n"
934 " iihh %0,0 \n"
935 " iihl %0,0 \n"
936 " lgr %1,1 \n"
937 "2: \n"
938 ".section .fixup,\"ax\" \n"
939 "3: \n"
940 " lhi %0,%h4 \n"
941 " jg 2b \n"
942 ".previous \n"
943 ".section __ex_table,\"a\" \n"
944 " .align 8 \n"
945 " .quad 0b,3b \n"
946 " .quad 1b,3b \n"
947 ".previous"
948 :"=d" (ccode),"=d" (*stat)
949 :"d" (msg_len),"a" (msg_ext), "K" (DEV_SEN_EXCEPTION)
950 :"cc","0","1","2","3","6","7","memory");
951#else
952 (" lr 6,%3 \n"
953 " lr 7,%2 \n"
954 " l 0,0(6) \n"
955 " lhi 1,64 \n"
956 " sll 1,24 \n"
957 " or 0,1 \n"
958 " la 6,4(6) \n"
959 " l 2,0(6) \n"
960 " l 3,4(6) \n"
961 " la 6,8(6) \n"
962 " slr 1,1 \n"
963 "0: .long 0xb2ad0026 \n"
964 "1: brc 2,0b \n"
965 " ipm %0 \n"
966 " srl %0,28 \n"
967 " lr %1,1 \n"
968 "2: \n"
969 ".section .fixup,\"ax\" \n"
970 "3: \n"
971 " lhi %0,%h4 \n"
972 " bras 1,4f \n"
973 " .long 2b \n"
974 "4: \n"
975 " l 1,0(1) \n"
976 " br 1 \n"
977 ".previous \n"
978 ".section __ex_table,\"a\" \n"
979 " .align 4 \n"
980 " .long 0b,3b \n"
981 " .long 1b,3b \n"
982 ".previous"
983 :"=d" (ccode),"=d" (*stat)
984 :"d" (msg_len),"a" (msg_ext), "K" (DEV_SEN_EXCEPTION)
985 :"cc","0","1","2","3","6","7","memory");
986#endif
987 return ccode;
988}
989
990static inline int
991rec(int q_nr, int buff_l, unsigned char *rsp, unsigned char *id,
992 struct ap_status_word *st)
993{
994 int ccode;
995
996 asm volatile
997#ifdef CONFIG_64BIT
998 (" llgfr 0,%2 \n"
999 " lgr 3,%4 \n"
1000 " lgr 6,%3 \n"
1001 " llgfr 7,%5 \n"
1002 " lghi 1,128 \n"
1003 " sll 1,24 \n"
1004 " or 0,1 \n"
1005 " slgr 1,1 \n"
1006 " lgr 2,1 \n"
1007 " lgr 4,1 \n"
1008 " lgr 5,1 \n"
1009 "0: .long 0xb2ae0046 \n"
1010 "1: brc 2,0b \n"
1011 " brc 4,0b \n"
1012 " ipm %0 \n"
1013 " srl %0,28 \n"
1014 " iihh %0,0 \n"
1015 " iihl %0,0 \n"
1016 " lgr %1,1 \n"
1017 " st 4,0(3) \n"
1018 " st 5,4(3) \n"
1019 "2: \n"
1020 ".section .fixup,\"ax\" \n"
1021 "3: \n"
1022 " lhi %0,%h6 \n"
1023 " jg 2b \n"
1024 ".previous \n"
1025 ".section __ex_table,\"a\" \n"
1026 " .align 8 \n"
1027 " .quad 0b,3b \n"
1028 " .quad 1b,3b \n"
1029 ".previous"
1030 :"=d"(ccode),"=d"(*st)
1031 :"d" (q_nr), "d" (rsp), "d" (id), "d" (buff_l), "K" (DEV_REC_EXCEPTION)
1032 :"cc","0","1","2","3","4","5","6","7","memory");
1033#else
1034 (" lr 0,%2 \n"
1035 " lr 3,%4 \n"
1036 " lr 6,%3 \n"
1037 " lr 7,%5 \n"
1038 " lhi 1,128 \n"
1039 " sll 1,24 \n"
1040 " or 0,1 \n"
1041 " slr 1,1 \n"
1042 " lr 2,1 \n"
1043 " lr 4,1 \n"
1044 " lr 5,1 \n"
1045 "0: .long 0xb2ae0046 \n"
1046 "1: brc 2,0b \n"
1047 " brc 4,0b \n"
1048 " ipm %0 \n"
1049 " srl %0,28 \n"
1050 " lr %1,1 \n"
1051 " st 4,0(3) \n"
1052 " st 5,4(3) \n"
1053 "2: \n"
1054 ".section .fixup,\"ax\" \n"
1055 "3: \n"
1056 " lhi %0,%h6 \n"
1057 " bras 1,4f \n"
1058 " .long 2b \n"
1059 "4: \n"
1060 " l 1,0(1) \n"
1061 " br 1 \n"
1062 ".previous \n"
1063 ".section __ex_table,\"a\" \n"
1064 " .align 4 \n"
1065 " .long 0b,3b \n"
1066 " .long 1b,3b \n"
1067 ".previous"
1068 :"=d"(ccode),"=d"(*st)
1069 :"d" (q_nr), "d" (rsp), "d" (id), "d" (buff_l), "K" (DEV_REC_EXCEPTION)
1070 :"cc","0","1","2","3","4","5","6","7","memory");
1071#endif
1072 return ccode;
1073}
1074
1075static inline void
1076itoLe2(int *i_p, unsigned char *lechars)
1077{
1078 *lechars = *((unsigned char *) i_p + sizeof(int) - 1);
1079 *(lechars + 1) = *((unsigned char *) i_p + sizeof(int) - 2);
1080}
1081
1082static inline void
1083le2toI(unsigned char *lechars, int *i_p)
1084{
1085 unsigned char *ic_p;
1086 *i_p = 0;
1087 ic_p = (unsigned char *) i_p;
1088 *(ic_p + 2) = *(lechars + 1);
1089 *(ic_p + 3) = *(lechars);
1090}
1091
1092static inline int
1093is_empty(unsigned char *ptr, int len)
1094{
1095 return !memcmp(ptr, (unsigned char *) &static_pvt_me_key+60, len);
1096}
1097
1098enum hdstat
1099query_online(int deviceNr, int cdx, int resetNr, int *q_depth, int *dev_type)
1100{
1101 int q_nr, i, t_depth, t_dev_type;
1102 enum devstat ccode;
1103 struct ap_status_word stat_word;
1104 enum hdstat stat;
1105 int break_out;
1106
1107 q_nr = (deviceNr << SKIP_BITL) + cdx;
1108 stat = HD_BUSY;
1109 ccode = testq(q_nr, &t_depth, &t_dev_type, &stat_word);
1110 PDEBUG("ccode %d response_code %02X\n", ccode, stat_word.response_code);
1111 break_out = 0;
1112 for (i = 0; i < resetNr; i++) {
1113 if (ccode > 3) {
1114 PRINTKC("Exception testing device %d\n", i);
1115 return HD_TSQ_EXCEPTION;
1116 }
1117 switch (ccode) {
1118 case 0:
1119 PDEBUG("t_dev_type %d\n", t_dev_type);
1120 break_out = 1;
1121 stat = HD_ONLINE;
1122 *q_depth = t_depth + 1;
1123 switch (t_dev_type) {
1124 case PCICA_HW:
1125 *dev_type = PCICA;
1126 break;
1127 case PCICC_HW:
1128 *dev_type = PCICC;
1129 break;
1130 case PCIXCC_HW:
1131 *dev_type = PCIXCC_UNK;
1132 break;
1133 case CEX2C_HW:
1134 *dev_type = CEX2C;
1135 break;
1136 case CEX2A_HW:
1137 *dev_type = CEX2A;
1138 break;
1139 default:
1140 *dev_type = NILDEV;
1141 break;
1142 }
1143 PDEBUG("available device %d: Q depth = %d, dev "
1144 "type = %d, stat = %02X%02X%02X%02X\n",
1145 deviceNr, *q_depth, *dev_type,
1146 stat_word.q_stat_flags,
1147 stat_word.response_code,
1148 stat_word.reserved[0],
1149 stat_word.reserved[1]);
1150 break;
1151 case 3:
1152 switch (stat_word.response_code) {
1153 case AP_RESPONSE_NORMAL:
1154 stat = HD_ONLINE;
1155 break_out = 1;
1156 *q_depth = t_depth + 1;
1157 *dev_type = t_dev_type;
1158 PDEBUG("cc3, available device "
1159 "%d: Q depth = %d, dev "
1160 "type = %d, stat = "
1161 "%02X%02X%02X%02X\n",
1162 deviceNr, *q_depth,
1163 *dev_type,
1164 stat_word.q_stat_flags,
1165 stat_word.response_code,
1166 stat_word.reserved[0],
1167 stat_word.reserved[1]);
1168 break;
1169 case AP_RESPONSE_Q_NOT_AVAIL:
1170 stat = HD_NOT_THERE;
1171 break_out = 1;
1172 break;
1173 case AP_RESPONSE_RESET_IN_PROGRESS:
1174 PDEBUG("device %d in reset\n",
1175 deviceNr);
1176 break;
1177 case AP_RESPONSE_DECONFIGURED:
1178 stat = HD_DECONFIGURED;
1179 break_out = 1;
1180 break;
1181 case AP_RESPONSE_CHECKSTOPPED:
1182 stat = HD_CHECKSTOPPED;
1183 break_out = 1;
1184 break;
1185 case AP_RESPONSE_BUSY:
1186 PDEBUG("device %d busy\n",
1187 deviceNr);
1188 break;
1189 default:
1190 break;
1191 }
1192 break;
1193 default:
1194 stat = HD_NOT_THERE;
1195 break_out = 1;
1196 break;
1197 }
1198 if (break_out)
1199 break;
1200
1201 udelay(5);
1202
1203 ccode = testq(q_nr, &t_depth, &t_dev_type, &stat_word);
1204 }
1205 return stat;
1206}
1207
1208enum devstat
1209reset_device(int deviceNr, int cdx, int resetNr)
1210{
1211 int q_nr, ccode = 0, dummy_qdepth, dummy_devType, i;
1212 struct ap_status_word stat_word;
1213 enum devstat stat;
1214 int break_out;
1215
1216 q_nr = (deviceNr << SKIP_BITL) + cdx;
1217 stat = DEV_GONE;
1218 ccode = resetq(q_nr, &stat_word);
1219 if (ccode > 3)
1220 return DEV_RSQ_EXCEPTION;
1221
1222 break_out = 0;
1223 for (i = 0; i < resetNr; i++) {
1224 switch (ccode) {
1225 case 0:
1226 stat = DEV_ONLINE;
1227 if (stat_word.q_stat_flags & AP_Q_STATUS_EMPTY)
1228 break_out = 1;
1229 break;
1230 case 3:
1231 switch (stat_word.response_code) {
1232 case AP_RESPONSE_NORMAL:
1233 stat = DEV_ONLINE;
1234 if (stat_word.q_stat_flags & AP_Q_STATUS_EMPTY)
1235 break_out = 1;
1236 break;
1237 case AP_RESPONSE_Q_NOT_AVAIL:
1238 case AP_RESPONSE_DECONFIGURED:
1239 case AP_RESPONSE_CHECKSTOPPED:
1240 stat = DEV_GONE;
1241 break_out = 1;
1242 break;
1243 case AP_RESPONSE_RESET_IN_PROGRESS:
1244 case AP_RESPONSE_BUSY:
1245 default:
1246 break;
1247 }
1248 break;
1249 default:
1250 stat = DEV_GONE;
1251 break_out = 1;
1252 break;
1253 }
1254 if (break_out == 1)
1255 break;
1256 udelay(5);
1257
1258 ccode = testq(q_nr, &dummy_qdepth, &dummy_devType, &stat_word);
1259 if (ccode > 3) {
1260 stat = DEV_TSQ_EXCEPTION;
1261 break;
1262 }
1263 }
1264 PDEBUG("Number of testq's needed for reset: %d\n", i);
1265
1266 if (i >= resetNr) {
1267 stat = DEV_GONE;
1268 }
1269
1270 return stat;
1271}
1272
1273#ifdef DEBUG_HYDRA_MSGS
1274static inline void
1275print_buffer(unsigned char *buffer, int bufflen)
1276{
1277 int i;
1278 for (i = 0; i < bufflen; i += 16) {
1279 PRINTK("%04X: %02X%02X%02X%02X %02X%02X%02X%02X "
1280 "%02X%02X%02X%02X %02X%02X%02X%02X\n", i,
1281 buffer[i+0], buffer[i+1], buffer[i+2], buffer[i+3],
1282 buffer[i+4], buffer[i+5], buffer[i+6], buffer[i+7],
1283 buffer[i+8], buffer[i+9], buffer[i+10], buffer[i+11],
1284 buffer[i+12], buffer[i+13], buffer[i+14], buffer[i+15]);
1285 }
1286}
1287#endif
1288
1289enum devstat
1290send_to_AP(int dev_nr, int cdx, int msg_len, unsigned char *msg_ext)
1291{
1292 struct ap_status_word stat_word;
1293 enum devstat stat;
1294 int ccode;
1295 u32 *q_nr_p = (u32 *)msg_ext;
1296
1297 *q_nr_p = (dev_nr << SKIP_BITL) + cdx;
1298 PDEBUG("msg_len passed to sen: %d\n", msg_len);
1299 PDEBUG("q number passed to sen: %02x%02x%02x%02x\n",
1300 msg_ext[0], msg_ext[1], msg_ext[2], msg_ext[3]);
1301 stat = DEV_GONE;
1302
1303#ifdef DEBUG_HYDRA_MSGS
1304 PRINTK("Request header: %02X%02X%02X%02X %02X%02X%02X%02X "
1305 "%02X%02X%02X%02X\n",
1306 msg_ext[0], msg_ext[1], msg_ext[2], msg_ext[3],
1307 msg_ext[4], msg_ext[5], msg_ext[6], msg_ext[7],
1308 msg_ext[8], msg_ext[9], msg_ext[10], msg_ext[11]);
1309 print_buffer(msg_ext+CALLER_HEADER, msg_len);
1310#endif
1311
1312 ccode = sen(msg_len, msg_ext, &stat_word);
1313 if (ccode > 3)
1314 return DEV_SEN_EXCEPTION;
1315
1316 PDEBUG("nq cc: %u, st: %02x%02x%02x%02x\n",
1317 ccode, stat_word.q_stat_flags, stat_word.response_code,
1318 stat_word.reserved[0], stat_word.reserved[1]);
1319 switch (ccode) {
1320 case 0:
1321 stat = DEV_ONLINE;
1322 break;
1323 case 1:
1324 stat = DEV_GONE;
1325 break;
1326 case 3:
1327 switch (stat_word.response_code) {
1328 case AP_RESPONSE_NORMAL:
1329 stat = DEV_ONLINE;
1330 break;
1331 case AP_RESPONSE_Q_FULL:
1332 stat = DEV_QUEUE_FULL;
1333 break;
1334 default:
1335 stat = DEV_GONE;
1336 break;
1337 }
1338 break;
1339 default:
1340 stat = DEV_GONE;
1341 break;
1342 }
1343
1344 return stat;
1345}
1346
1347enum devstat
1348receive_from_AP(int dev_nr, int cdx, int resplen, unsigned char *resp,
1349 unsigned char *psmid)
1350{
1351 int ccode;
1352 struct ap_status_word stat_word;
1353 enum devstat stat;
1354
1355 memset(resp, 0x00, 8);
1356
1357 ccode = rec((dev_nr << SKIP_BITL) + cdx, resplen, resp, psmid,
1358 &stat_word);
1359 if (ccode > 3)
1360 return DEV_REC_EXCEPTION;
1361
1362 PDEBUG("dq cc: %u, st: %02x%02x%02x%02x\n",
1363 ccode, stat_word.q_stat_flags, stat_word.response_code,
1364 stat_word.reserved[0], stat_word.reserved[1]);
1365
1366 stat = DEV_GONE;
1367 switch (ccode) {
1368 case 0:
1369 stat = DEV_ONLINE;
1370#ifdef DEBUG_HYDRA_MSGS
1371 print_buffer(resp, resplen);
1372#endif
1373 break;
1374 case 3:
1375 switch (stat_word.response_code) {
1376 case AP_RESPONSE_NORMAL:
1377 stat = DEV_ONLINE;
1378 break;
1379 case AP_RESPONSE_NO_PENDING_REPLY:
1380 if (stat_word.q_stat_flags & AP_Q_STATUS_EMPTY)
1381 stat = DEV_EMPTY;
1382 else
1383 stat = DEV_NO_WORK;
1384 break;
1385 case AP_RESPONSE_INDEX_TOO_BIG:
1386 case AP_RESPONSE_NO_FIRST_PART:
1387 case AP_RESPONSE_MESSAGE_TOO_BIG:
1388 stat = DEV_BAD_MESSAGE;
1389 break;
1390 default:
1391 break;
1392 }
1393 break;
1394 default:
1395 break;
1396 }
1397
1398 return stat;
1399}
1400
1401static inline int
1402pad_msg(unsigned char *buffer, int totalLength, int msgLength)
1403{
1404 int pad_len;
1405
1406 for (pad_len = 0; pad_len < (totalLength - msgLength); pad_len++)
1407 if (buffer[pad_len] != 0x00)
1408 break;
1409 pad_len -= 3;
1410 if (pad_len < 8)
1411 return SEN_PAD_ERROR;
1412
1413 buffer[0] = 0x00;
1414 buffer[1] = 0x02;
1415
1416 memcpy(buffer+2, static_pad, pad_len);
1417
1418 buffer[pad_len + 2] = 0x00;
1419
1420 return 0;
1421}
1422
1423static inline int
1424is_common_public_key(unsigned char *key, int len)
1425{
1426 int i;
1427
1428 for (i = 0; i < len; i++)
1429 if (key[i])
1430 break;
1431 key += i;
1432 len -= i;
1433 if (((len == 1) && (key[0] == 3)) ||
1434 ((len == 3) && (key[0] == 1) && (key[1] == 0) && (key[2] == 1)))
1435 return 1;
1436
1437 return 0;
1438}
1439
1440static int
1441ICAMEX_msg_to_type4MEX_msg(struct ica_rsa_modexpo *icaMex_p, int *z90cMsg_l_p,
1442 union type4_msg *z90cMsg_p)
1443{
1444 int mod_len, msg_size, mod_tgt_len, exp_tgt_len, inp_tgt_len;
1445 unsigned char *mod_tgt, *exp_tgt, *inp_tgt;
1446 union type4_msg *tmp_type4_msg;
1447
1448 mod_len = icaMex_p->inputdatalength;
1449
1450 msg_size = ((mod_len <= 128) ? TYPE4_SME_LEN : TYPE4_LME_LEN) +
1451 CALLER_HEADER;
1452
1453 memset(z90cMsg_p, 0, msg_size);
1454
1455 tmp_type4_msg = (union type4_msg *)
1456 ((unsigned char *) z90cMsg_p + CALLER_HEADER);
1457
1458 tmp_type4_msg->sme.header.msg_type_code = TYPE4_TYPE_CODE;
1459 tmp_type4_msg->sme.header.request_code = TYPE4_REQU_CODE;
1460
1461 if (mod_len <= 128) {
1462 tmp_type4_msg->sme.header.msg_fmt = TYPE4_SME_FMT;
1463 tmp_type4_msg->sme.header.msg_len = TYPE4_SME_LEN;
1464 mod_tgt = tmp_type4_msg->sme.modulus;
1465 mod_tgt_len = sizeof(tmp_type4_msg->sme.modulus);
1466 exp_tgt = tmp_type4_msg->sme.exponent;
1467 exp_tgt_len = sizeof(tmp_type4_msg->sme.exponent);
1468 inp_tgt = tmp_type4_msg->sme.message;
1469 inp_tgt_len = sizeof(tmp_type4_msg->sme.message);
1470 } else {
1471 tmp_type4_msg->lme.header.msg_fmt = TYPE4_LME_FMT;
1472 tmp_type4_msg->lme.header.msg_len = TYPE4_LME_LEN;
1473 mod_tgt = tmp_type4_msg->lme.modulus;
1474 mod_tgt_len = sizeof(tmp_type4_msg->lme.modulus);
1475 exp_tgt = tmp_type4_msg->lme.exponent;
1476 exp_tgt_len = sizeof(tmp_type4_msg->lme.exponent);
1477 inp_tgt = tmp_type4_msg->lme.message;
1478 inp_tgt_len = sizeof(tmp_type4_msg->lme.message);
1479 }
1480
1481 mod_tgt += (mod_tgt_len - mod_len);
1482 if (copy_from_user(mod_tgt, icaMex_p->n_modulus, mod_len))
1483 return SEN_RELEASED;
1484 if (is_empty(mod_tgt, mod_len))
1485 return SEN_USER_ERROR;
1486 exp_tgt += (exp_tgt_len - mod_len);
1487 if (copy_from_user(exp_tgt, icaMex_p->b_key, mod_len))
1488 return SEN_RELEASED;
1489 if (is_empty(exp_tgt, mod_len))
1490 return SEN_USER_ERROR;
1491 inp_tgt += (inp_tgt_len - mod_len);
1492 if (copy_from_user(inp_tgt, icaMex_p->inputdata, mod_len))
1493 return SEN_RELEASED;
1494 if (is_empty(inp_tgt, mod_len))
1495 return SEN_USER_ERROR;
1496
1497 *z90cMsg_l_p = msg_size - CALLER_HEADER;
1498
1499 return 0;
1500}
1501
1502static int
1503ICACRT_msg_to_type4CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p,
1504 int *z90cMsg_l_p, union type4_msg *z90cMsg_p)
1505{
1506 int mod_len, short_len, long_len, tmp_size, p_tgt_len, q_tgt_len,
1507 dp_tgt_len, dq_tgt_len, u_tgt_len, inp_tgt_len;
1508 unsigned char *p_tgt, *q_tgt, *dp_tgt, *dq_tgt, *u_tgt, *inp_tgt;
1509 union type4_msg *tmp_type4_msg;
1510
1511 mod_len = icaMsg_p->inputdatalength;
1512 short_len = mod_len / 2;
1513 long_len = mod_len / 2 + 8;
1514
1515 tmp_size = ((mod_len <= 128) ? TYPE4_SCR_LEN : TYPE4_LCR_LEN) +
1516 CALLER_HEADER;
1517
1518 memset(z90cMsg_p, 0, tmp_size);
1519
1520 tmp_type4_msg = (union type4_msg *)
1521 ((unsigned char *) z90cMsg_p + CALLER_HEADER);
1522
1523 tmp_type4_msg->scr.header.msg_type_code = TYPE4_TYPE_CODE;
1524 tmp_type4_msg->scr.header.request_code = TYPE4_REQU_CODE;
1525 if (mod_len <= 128) {
1526 tmp_type4_msg->scr.header.msg_fmt = TYPE4_SCR_FMT;
1527 tmp_type4_msg->scr.header.msg_len = TYPE4_SCR_LEN;
1528 p_tgt = tmp_type4_msg->scr.p;
1529 p_tgt_len = sizeof(tmp_type4_msg->scr.p);
1530 q_tgt = tmp_type4_msg->scr.q;
1531 q_tgt_len = sizeof(tmp_type4_msg->scr.q);
1532 dp_tgt = tmp_type4_msg->scr.dp;
1533 dp_tgt_len = sizeof(tmp_type4_msg->scr.dp);
1534 dq_tgt = tmp_type4_msg->scr.dq;
1535 dq_tgt_len = sizeof(tmp_type4_msg->scr.dq);
1536 u_tgt = tmp_type4_msg->scr.u;
1537 u_tgt_len = sizeof(tmp_type4_msg->scr.u);
1538 inp_tgt = tmp_type4_msg->scr.message;
1539 inp_tgt_len = sizeof(tmp_type4_msg->scr.message);
1540 } else {
1541 tmp_type4_msg->lcr.header.msg_fmt = TYPE4_LCR_FMT;
1542 tmp_type4_msg->lcr.header.msg_len = TYPE4_LCR_LEN;
1543 p_tgt = tmp_type4_msg->lcr.p;
1544 p_tgt_len = sizeof(tmp_type4_msg->lcr.p);
1545 q_tgt = tmp_type4_msg->lcr.q;
1546 q_tgt_len = sizeof(tmp_type4_msg->lcr.q);
1547 dp_tgt = tmp_type4_msg->lcr.dp;
1548 dp_tgt_len = sizeof(tmp_type4_msg->lcr.dp);
1549 dq_tgt = tmp_type4_msg->lcr.dq;
1550 dq_tgt_len = sizeof(tmp_type4_msg->lcr.dq);
1551 u_tgt = tmp_type4_msg->lcr.u;
1552 u_tgt_len = sizeof(tmp_type4_msg->lcr.u);
1553 inp_tgt = tmp_type4_msg->lcr.message;
1554 inp_tgt_len = sizeof(tmp_type4_msg->lcr.message);
1555 }
1556
1557 p_tgt += (p_tgt_len - long_len);
1558 if (copy_from_user(p_tgt, icaMsg_p->np_prime, long_len))
1559 return SEN_RELEASED;
1560 if (is_empty(p_tgt, long_len))
1561 return SEN_USER_ERROR;
1562 q_tgt += (q_tgt_len - short_len);
1563 if (copy_from_user(q_tgt, icaMsg_p->nq_prime, short_len))
1564 return SEN_RELEASED;
1565 if (is_empty(q_tgt, short_len))
1566 return SEN_USER_ERROR;
1567 dp_tgt += (dp_tgt_len - long_len);
1568 if (copy_from_user(dp_tgt, icaMsg_p->bp_key, long_len))
1569 return SEN_RELEASED;
1570 if (is_empty(dp_tgt, long_len))
1571 return SEN_USER_ERROR;
1572 dq_tgt += (dq_tgt_len - short_len);
1573 if (copy_from_user(dq_tgt, icaMsg_p->bq_key, short_len))
1574 return SEN_RELEASED;
1575 if (is_empty(dq_tgt, short_len))
1576 return SEN_USER_ERROR;
1577 u_tgt += (u_tgt_len - long_len);
1578 if (copy_from_user(u_tgt, icaMsg_p->u_mult_inv, long_len))
1579 return SEN_RELEASED;
1580 if (is_empty(u_tgt, long_len))
1581 return SEN_USER_ERROR;
1582 inp_tgt += (inp_tgt_len - mod_len);
1583 if (copy_from_user(inp_tgt, icaMsg_p->inputdata, mod_len))
1584 return SEN_RELEASED;
1585 if (is_empty(inp_tgt, mod_len))
1586 return SEN_USER_ERROR;
1587
1588 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
1589
1590 return 0;
1591}
1592
1593static int
1594ICAMEX_msg_to_type6MEX_de_msg(struct ica_rsa_modexpo *icaMsg_p, int cdx,
1595 int *z90cMsg_l_p, struct type6_msg *z90cMsg_p)
1596{
1597 int mod_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l;
1598 unsigned char *temp;
1599 struct type6_hdr *tp6Hdr_p;
1600 struct CPRB *cprb_p;
1601 struct cca_private_ext_ME *key_p;
1602 static int deprecated_msg_count = 0;
1603
1604 mod_len = icaMsg_p->inputdatalength;
1605 tmp_size = FIXED_TYPE6_ME_LEN + mod_len;
1606 total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
1607 parmBlock_l = total_CPRB_len - sizeof(struct CPRB);
1608 tmp_size = 4*((tmp_size + 3)/4) + CALLER_HEADER;
1609
1610 memset(z90cMsg_p, 0, tmp_size);
1611
1612 temp = (unsigned char *)z90cMsg_p + CALLER_HEADER;
1613 memcpy(temp, &static_type6_hdr, sizeof(struct type6_hdr));
1614 tp6Hdr_p = (struct type6_hdr *)temp;
1615 tp6Hdr_p->ToCardLen1 = 4*((total_CPRB_len+3)/4);
1616 tp6Hdr_p->FromCardLen1 = RESPONSE_CPRB_SIZE;
1617
1618 temp += sizeof(struct type6_hdr);
1619 memcpy(temp, &static_cprb, sizeof(struct CPRB));
1620 cprb_p = (struct CPRB *) temp;
1621 cprb_p->usage_domain[0]= (unsigned char)cdx;
1622 itoLe2(&parmBlock_l, cprb_p->req_parml);
1623 itoLe2((int *)&(tp6Hdr_p->FromCardLen1), cprb_p->rpl_parml);
1624
1625 temp += sizeof(struct CPRB);
1626 memcpy(temp, &static_pkd_function_and_rules,
1627 sizeof(struct function_and_rules_block));
1628
1629 temp += sizeof(struct function_and_rules_block);
1630 vud_len = 2 + icaMsg_p->inputdatalength;
1631 itoLe2(&vud_len, temp);
1632
1633 temp += 2;
1634 if (copy_from_user(temp, icaMsg_p->inputdata, mod_len))
1635 return SEN_RELEASED;
1636 if (is_empty(temp, mod_len))
1637 return SEN_USER_ERROR;
1638
1639 temp += mod_len;
1640 memcpy(temp, &static_T6_keyBlock_hdr, sizeof(struct T6_keyBlock_hdr));
1641
1642 temp += sizeof(struct T6_keyBlock_hdr);
1643 memcpy(temp, &static_pvt_me_key, sizeof(struct cca_private_ext_ME));
1644 key_p = (struct cca_private_ext_ME *)temp;
1645 temp = key_p->pvtMESec.exponent + sizeof(key_p->pvtMESec.exponent)
1646 - mod_len;
1647 if (copy_from_user(temp, icaMsg_p->b_key, mod_len))
1648 return SEN_RELEASED;
1649 if (is_empty(temp, mod_len))
1650 return SEN_USER_ERROR;
1651
1652 if (is_common_public_key(temp, mod_len)) {
1653 if (deprecated_msg_count < 20) {
1654 PRINTK("Common public key used for modex decrypt\n");
1655 deprecated_msg_count++;
1656 if (deprecated_msg_count == 20)
1657 PRINTK("No longer issuing messages about common"
1658 " public key for modex decrypt.\n");
1659 }
1660 return SEN_NOT_AVAIL;
1661 }
1662
1663 temp = key_p->pvtMESec.modulus + sizeof(key_p->pvtMESec.modulus)
1664 - mod_len;
1665 if (copy_from_user(temp, icaMsg_p->n_modulus, mod_len))
1666 return SEN_RELEASED;
1667 if (is_empty(temp, mod_len))
1668 return SEN_USER_ERROR;
1669
1670 key_p->pubMESec.modulus_bit_len = 8 * mod_len;
1671
1672 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
1673
1674 return 0;
1675}
1676
1677static int
1678ICAMEX_msg_to_type6MEX_en_msg(struct ica_rsa_modexpo *icaMsg_p, int cdx,
1679 int *z90cMsg_l_p, struct type6_msg *z90cMsg_p)
1680{
1681 int mod_len, vud_len, exp_len, key_len;
1682 int pad_len, tmp_size, total_CPRB_len, parmBlock_l, i;
1683 unsigned char *temp_exp, *exp_p, *temp;
1684 struct type6_hdr *tp6Hdr_p;
1685 struct CPRB *cprb_p;
1686 struct cca_public_key *key_p;
1687 struct T6_keyBlock_hdr *keyb_p;
1688
1689 temp_exp = kmalloc(256, GFP_KERNEL);
1690 if (!temp_exp)
1691 return EGETBUFF;
1692 mod_len = icaMsg_p->inputdatalength;
1693 if (copy_from_user(temp_exp, icaMsg_p->b_key, mod_len)) {
1694 kfree(temp_exp);
1695 return SEN_RELEASED;
1696 }
1697 if (is_empty(temp_exp, mod_len)) {
1698 kfree(temp_exp);
1699 return SEN_USER_ERROR;
1700 }
1701
1702 exp_p = temp_exp;
1703 for (i = 0; i < mod_len; i++)
1704 if (exp_p[i])
1705 break;
1706 if (i >= mod_len) {
1707 kfree(temp_exp);
1708 return SEN_USER_ERROR;
1709 }
1710
1711 exp_len = mod_len - i;
1712 exp_p += i;
1713
1714 PDEBUG("exp_len after computation: %08x\n", exp_len);
1715 tmp_size = FIXED_TYPE6_ME_EN_LEN + 2 * mod_len + exp_len;
1716 total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
1717 parmBlock_l = total_CPRB_len - sizeof(struct CPRB);
1718 tmp_size = 4*((tmp_size + 3)/4) + CALLER_HEADER;
1719
1720 vud_len = 2 + mod_len;
1721 memset(z90cMsg_p, 0, tmp_size);
1722
1723 temp = (unsigned char *)z90cMsg_p + CALLER_HEADER;
1724 memcpy(temp, &static_type6_hdr, sizeof(struct type6_hdr));
1725 tp6Hdr_p = (struct type6_hdr *)temp;
1726 tp6Hdr_p->ToCardLen1 = 4*((total_CPRB_len+3)/4);
1727 tp6Hdr_p->FromCardLen1 = RESPONSE_CPRB_SIZE;
1728 memcpy(tp6Hdr_p->function_code, static_PKE_function_code,
1729 sizeof(static_PKE_function_code));
1730 temp += sizeof(struct type6_hdr);
1731 memcpy(temp, &static_cprb, sizeof(struct CPRB));
1732 cprb_p = (struct CPRB *) temp;
1733 cprb_p->usage_domain[0]= (unsigned char)cdx;
1734 itoLe2((int *)&(tp6Hdr_p->FromCardLen1), cprb_p->rpl_parml);
1735 temp += sizeof(struct CPRB);
1736 memcpy(temp, &static_pke_function_and_rules,
1737 sizeof(struct function_and_rules_block));
1738 temp += sizeof(struct function_and_rules_block);
1739 temp += 2;
1740 if (copy_from_user(temp, icaMsg_p->inputdata, mod_len)) {
1741 kfree(temp_exp);
1742 return SEN_RELEASED;
1743 }
1744 if (is_empty(temp, mod_len)) {
1745 kfree(temp_exp);
1746 return SEN_USER_ERROR;
1747 }
1748 if ((temp[0] != 0x00) || (temp[1] != 0x02)) {
1749 kfree(temp_exp);
1750 return SEN_NOT_AVAIL;
1751 }
1752 for (i = 2; i < mod_len; i++)
1753 if (temp[i] == 0x00)
1754 break;
1755 if ((i < 9) || (i > (mod_len - 2))) {
1756 kfree(temp_exp);
1757 return SEN_NOT_AVAIL;
1758 }
1759 pad_len = i + 1;
1760 vud_len = mod_len - pad_len;
1761 memmove(temp, temp+pad_len, vud_len);
1762 temp -= 2;
1763 vud_len += 2;
1764 itoLe2(&vud_len, temp);
1765 temp += (vud_len);
1766 keyb_p = (struct T6_keyBlock_hdr *)temp;
1767 temp += sizeof(struct T6_keyBlock_hdr);
1768 memcpy(temp, &static_public_key, sizeof(static_public_key));
1769 key_p = (struct cca_public_key *)temp;
1770 temp = key_p->pubSec.exponent;
1771 memcpy(temp, exp_p, exp_len);
1772 kfree(temp_exp);
1773 temp += exp_len;
1774 if (copy_from_user(temp, icaMsg_p->n_modulus, mod_len))
1775 return SEN_RELEASED;
1776 if (is_empty(temp, mod_len))
1777 return SEN_USER_ERROR;
1778 key_p->pubSec.modulus_bit_len = 8 * mod_len;
1779 key_p->pubSec.modulus_byte_len = mod_len;
1780 key_p->pubSec.exponent_len = exp_len;
1781 key_p->pubSec.section_length = CALLER_HEADER + mod_len + exp_len;
1782 key_len = key_p->pubSec.section_length + sizeof(struct cca_token_hdr);
1783 key_p->pubHdr.token_length = key_len;
1784 key_len += 4;
1785 itoLe2(&key_len, keyb_p->ulen);
1786 key_len += 2;
1787 itoLe2(&key_len, keyb_p->blen);
1788 parmBlock_l -= pad_len;
1789 itoLe2(&parmBlock_l, cprb_p->req_parml);
1790 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
1791
1792 return 0;
1793}
1794
1795static int
1796ICACRT_msg_to_type6CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p, int cdx,
1797 int *z90cMsg_l_p, struct type6_msg *z90cMsg_p)
1798{
1799 int mod_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l, short_len;
1800 int long_len, pad_len, keyPartsLen, tmp_l;
1801 unsigned char *tgt_p, *temp;
1802 struct type6_hdr *tp6Hdr_p;
1803 struct CPRB *cprb_p;
1804 struct cca_token_hdr *keyHdr_p;
1805 struct cca_pvt_ext_CRT_sec *pvtSec_p;
1806 struct cca_public_sec *pubSec_p;
1807
1808 mod_len = icaMsg_p->inputdatalength;
1809 short_len = mod_len / 2;
1810 long_len = 8 + short_len;
1811 keyPartsLen = 3 * long_len + 2 * short_len;
1812 pad_len = (8 - (keyPartsLen % 8)) % 8;
1813 keyPartsLen += pad_len + mod_len;
1814 tmp_size = FIXED_TYPE6_CR_LEN + keyPartsLen + mod_len;
1815 total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
1816 parmBlock_l = total_CPRB_len - sizeof(struct CPRB);
1817 vud_len = 2 + mod_len;
1818 tmp_size = 4*((tmp_size + 3)/4) + CALLER_HEADER;
1819
1820 memset(z90cMsg_p, 0, tmp_size);
1821 tgt_p = (unsigned char *)z90cMsg_p + CALLER_HEADER;
1822 memcpy(tgt_p, &static_type6_hdr, sizeof(struct type6_hdr));
1823 tp6Hdr_p = (struct type6_hdr *)tgt_p;
1824 tp6Hdr_p->ToCardLen1 = 4*((total_CPRB_len+3)/4);
1825 tp6Hdr_p->FromCardLen1 = RESPONSE_CPRB_SIZE;
1826 tgt_p += sizeof(struct type6_hdr);
1827 cprb_p = (struct CPRB *) tgt_p;
1828 memcpy(tgt_p, &static_cprb, sizeof(struct CPRB));
1829 cprb_p->usage_domain[0]= *((unsigned char *)(&(cdx))+3);
1830 itoLe2(&parmBlock_l, cprb_p->req_parml);
1831 memcpy(cprb_p->rpl_parml, cprb_p->req_parml,
1832 sizeof(cprb_p->req_parml));
1833 tgt_p += sizeof(struct CPRB);
1834 memcpy(tgt_p, &static_pkd_function_and_rules,
1835 sizeof(struct function_and_rules_block));
1836 tgt_p += sizeof(struct function_and_rules_block);
1837 itoLe2(&vud_len, tgt_p);
1838 tgt_p += 2;
1839 if (copy_from_user(tgt_p, icaMsg_p->inputdata, mod_len))
1840 return SEN_RELEASED;
1841 if (is_empty(tgt_p, mod_len))
1842 return SEN_USER_ERROR;
1843 tgt_p += mod_len;
1844 tmp_l = sizeof(struct T6_keyBlock_hdr) + sizeof(struct cca_token_hdr) +
1845 sizeof(struct cca_pvt_ext_CRT_sec) + 0x0F + keyPartsLen;
1846 itoLe2(&tmp_l, tgt_p);
1847 temp = tgt_p + 2;
1848 tmp_l -= 2;
1849 itoLe2(&tmp_l, temp);
1850 tgt_p += sizeof(struct T6_keyBlock_hdr);
1851 keyHdr_p = (struct cca_token_hdr *)tgt_p;
1852 keyHdr_p->token_identifier = CCA_TKN_HDR_ID_EXT;
1853 tmp_l -= 4;
1854 keyHdr_p->token_length = tmp_l;
1855 tgt_p += sizeof(struct cca_token_hdr);
1856 pvtSec_p = (struct cca_pvt_ext_CRT_sec *)tgt_p;
1857 pvtSec_p->section_identifier = CCA_PVT_EXT_CRT_SEC_ID_PVT;
1858 pvtSec_p->section_length =
1859 sizeof(struct cca_pvt_ext_CRT_sec) + keyPartsLen;
1860 pvtSec_p->key_format = CCA_PVT_EXT_CRT_SEC_FMT_CL;
1861 pvtSec_p->key_use_flags[0] = CCA_PVT_USAGE_ALL;
1862 pvtSec_p->p_len = long_len;
1863 pvtSec_p->q_len = short_len;
1864 pvtSec_p->dp_len = long_len;
1865 pvtSec_p->dq_len = short_len;
1866 pvtSec_p->u_len = long_len;
1867 pvtSec_p->mod_len = mod_len;
1868 pvtSec_p->pad_len = pad_len;
1869 tgt_p += sizeof(struct cca_pvt_ext_CRT_sec);
1870 if (copy_from_user(tgt_p, icaMsg_p->np_prime, long_len))
1871 return SEN_RELEASED;
1872 if (is_empty(tgt_p, long_len))
1873 return SEN_USER_ERROR;
1874 tgt_p += long_len;
1875 if (copy_from_user(tgt_p, icaMsg_p->nq_prime, short_len))
1876 return SEN_RELEASED;
1877 if (is_empty(tgt_p, short_len))
1878 return SEN_USER_ERROR;
1879 tgt_p += short_len;
1880 if (copy_from_user(tgt_p, icaMsg_p->bp_key, long_len))
1881 return SEN_RELEASED;
1882 if (is_empty(tgt_p, long_len))
1883 return SEN_USER_ERROR;
1884 tgt_p += long_len;
1885 if (copy_from_user(tgt_p, icaMsg_p->bq_key, short_len))
1886 return SEN_RELEASED;
1887 if (is_empty(tgt_p, short_len))
1888 return SEN_USER_ERROR;
1889 tgt_p += short_len;
1890 if (copy_from_user(tgt_p, icaMsg_p->u_mult_inv, long_len))
1891 return SEN_RELEASED;
1892 if (is_empty(tgt_p, long_len))
1893 return SEN_USER_ERROR;
1894 tgt_p += long_len;
1895 tgt_p += pad_len;
1896 memset(tgt_p, 0xFF, mod_len);
1897 tgt_p += mod_len;
1898 memcpy(tgt_p, &static_cca_pub_sec, sizeof(struct cca_public_sec));
1899 pubSec_p = (struct cca_public_sec *) tgt_p;
1900 pubSec_p->modulus_bit_len = 8 * mod_len;
1901 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
1902
1903 return 0;
1904}
1905
1906static int
1907ICAMEX_msg_to_type6MEX_msgX(struct ica_rsa_modexpo *icaMsg_p, int cdx,
1908 int *z90cMsg_l_p, struct type6_msg *z90cMsg_p,
1909 int dev_type)
1910{
1911 int mod_len, exp_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l;
1912 int key_len, i;
1913 unsigned char *temp_exp, *tgt_p, *temp, *exp_p;
1914 struct type6_hdr *tp6Hdr_p;
1915 struct CPRBX *cprbx_p;
1916 struct cca_public_key *key_p;
1917 struct T6_keyBlock_hdrX *keyb_p;
1918
1919 temp_exp = kmalloc(256, GFP_KERNEL);
1920 if (!temp_exp)
1921 return EGETBUFF;
1922 mod_len = icaMsg_p->inputdatalength;
1923 if (copy_from_user(temp_exp, icaMsg_p->b_key, mod_len)) {
1924 kfree(temp_exp);
1925 return SEN_RELEASED;
1926 }
1927 if (is_empty(temp_exp, mod_len)) {
1928 kfree(temp_exp);
1929 return SEN_USER_ERROR;
1930 }
1931 exp_p = temp_exp;
1932 for (i = 0; i < mod_len; i++)
1933 if (exp_p[i])
1934 break;
1935 if (i >= mod_len) {
1936 kfree(temp_exp);
1937 return SEN_USER_ERROR;
1938 }
1939 exp_len = mod_len - i;
1940 exp_p += i;
1941 PDEBUG("exp_len after computation: %08x\n", exp_len);
1942 tmp_size = FIXED_TYPE6_ME_EN_LENX + 2 * mod_len + exp_len;
1943 total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
1944 parmBlock_l = total_CPRB_len - sizeof(struct CPRBX);
1945 tmp_size = tmp_size + CALLER_HEADER;
1946 vud_len = 2 + mod_len;
1947 memset(z90cMsg_p, 0, tmp_size);
1948 tgt_p = (unsigned char *)z90cMsg_p + CALLER_HEADER;
1949 memcpy(tgt_p, &static_type6_hdrX, sizeof(struct type6_hdr));
1950 tp6Hdr_p = (struct type6_hdr *)tgt_p;
1951 tp6Hdr_p->ToCardLen1 = total_CPRB_len;
1952 tp6Hdr_p->FromCardLen1 = RESPONSE_CPRBX_SIZE;
1953 memcpy(tp6Hdr_p->function_code, static_PKE_function_code,
1954 sizeof(static_PKE_function_code));
1955 tgt_p += sizeof(struct type6_hdr);
1956 memcpy(tgt_p, &static_cprbx, sizeof(struct CPRBX));
1957 cprbx_p = (struct CPRBX *) tgt_p;
1958 cprbx_p->domain = (unsigned short)cdx;
1959 cprbx_p->rpl_msgbl = RESPONSE_CPRBX_SIZE;
1960 tgt_p += sizeof(struct CPRBX);
1961 if (dev_type == PCIXCC_MCL2)
1962 memcpy(tgt_p, &static_pke_function_and_rulesX_MCL2,
1963 sizeof(struct function_and_rules_block));
1964 else
1965 memcpy(tgt_p, &static_pke_function_and_rulesX,
1966 sizeof(struct function_and_rules_block));
1967 tgt_p += sizeof(struct function_and_rules_block);
1968
1969 tgt_p += 2;
1970 if (copy_from_user(tgt_p, icaMsg_p->inputdata, mod_len)) {
1971 kfree(temp_exp);
1972 return SEN_RELEASED;
1973 }
1974 if (is_empty(tgt_p, mod_len)) {
1975 kfree(temp_exp);
1976 return SEN_USER_ERROR;
1977 }
1978 tgt_p -= 2;
1979 *((short *)tgt_p) = (short) vud_len;
1980 tgt_p += vud_len;
1981 keyb_p = (struct T6_keyBlock_hdrX *)tgt_p;
1982 tgt_p += sizeof(struct T6_keyBlock_hdrX);
1983 memcpy(tgt_p, &static_public_key, sizeof(static_public_key));
1984 key_p = (struct cca_public_key *)tgt_p;
1985 temp = key_p->pubSec.exponent;
1986 memcpy(temp, exp_p, exp_len);
1987 kfree(temp_exp);
1988 temp += exp_len;
1989 if (copy_from_user(temp, icaMsg_p->n_modulus, mod_len))
1990 return SEN_RELEASED;
1991 if (is_empty(temp, mod_len))
1992 return SEN_USER_ERROR;
1993 key_p->pubSec.modulus_bit_len = 8 * mod_len;
1994 key_p->pubSec.modulus_byte_len = mod_len;
1995 key_p->pubSec.exponent_len = exp_len;
1996 key_p->pubSec.section_length = CALLER_HEADER + mod_len + exp_len;
1997 key_len = key_p->pubSec.section_length + sizeof(struct cca_token_hdr);
1998 key_p->pubHdr.token_length = key_len;
1999 key_len += 4;
2000 keyb_p->ulen = (unsigned short)key_len;
2001 key_len += 2;
2002 keyb_p->blen = (unsigned short)key_len;
2003 cprbx_p->req_parml = parmBlock_l;
2004 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
2005
2006 return 0;
2007}
2008
2009static int
2010ICACRT_msg_to_type6CRT_msgX(struct ica_rsa_modexpo_crt *icaMsg_p, int cdx,
2011 int *z90cMsg_l_p, struct type6_msg *z90cMsg_p,
2012 int dev_type)
2013{
2014 int mod_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l, short_len;
2015 int long_len, pad_len, keyPartsLen, tmp_l;
2016 unsigned char *tgt_p, *temp;
2017 struct type6_hdr *tp6Hdr_p;
2018 struct CPRBX *cprbx_p;
2019 struct cca_token_hdr *keyHdr_p;
2020 struct cca_pvt_ext_CRT_sec *pvtSec_p;
2021 struct cca_public_sec *pubSec_p;
2022
2023 mod_len = icaMsg_p->inputdatalength;
2024 short_len = mod_len / 2;
2025 long_len = 8 + short_len;
2026 keyPartsLen = 3 * long_len + 2 * short_len;
2027 pad_len = (8 - (keyPartsLen % 8)) % 8;
2028 keyPartsLen += pad_len + mod_len;
2029 tmp_size = FIXED_TYPE6_CR_LENX + keyPartsLen + mod_len;
2030 total_CPRB_len = tmp_size - sizeof(struct type6_hdr);
2031 parmBlock_l = total_CPRB_len - sizeof(struct CPRBX);
2032 vud_len = 2 + mod_len;
2033 tmp_size = tmp_size + CALLER_HEADER;
2034 memset(z90cMsg_p, 0, tmp_size);
2035 tgt_p = (unsigned char *)z90cMsg_p + CALLER_HEADER;
2036 memcpy(tgt_p, &static_type6_hdrX, sizeof(struct type6_hdr));
2037 tp6Hdr_p = (struct type6_hdr *)tgt_p;
2038 tp6Hdr_p->ToCardLen1 = total_CPRB_len;
2039 tp6Hdr_p->FromCardLen1 = RESPONSE_CPRBX_SIZE;
2040 tgt_p += sizeof(struct type6_hdr);
2041 cprbx_p = (struct CPRBX *) tgt_p;
2042 memcpy(tgt_p, &static_cprbx, sizeof(struct CPRBX));
2043 cprbx_p->domain = (unsigned short)cdx;
2044 cprbx_p->req_parml = parmBlock_l;
2045 cprbx_p->rpl_msgbl = parmBlock_l;
2046 tgt_p += sizeof(struct CPRBX);
2047 if (dev_type == PCIXCC_MCL2)
2048 memcpy(tgt_p, &static_pkd_function_and_rulesX_MCL2,
2049 sizeof(struct function_and_rules_block));
2050 else
2051 memcpy(tgt_p, &static_pkd_function_and_rulesX,
2052 sizeof(struct function_and_rules_block));
2053 tgt_p += sizeof(struct function_and_rules_block);
2054 *((short *)tgt_p) = (short) vud_len;
2055 tgt_p += 2;
2056 if (copy_from_user(tgt_p, icaMsg_p->inputdata, mod_len))
2057 return SEN_RELEASED;
2058 if (is_empty(tgt_p, mod_len))
2059 return SEN_USER_ERROR;
2060 tgt_p += mod_len;
2061 tmp_l = sizeof(struct T6_keyBlock_hdr) + sizeof(struct cca_token_hdr) +
2062 sizeof(struct cca_pvt_ext_CRT_sec) + 0x0F + keyPartsLen;
2063 *((short *)tgt_p) = (short) tmp_l;
2064 temp = tgt_p + 2;
2065 tmp_l -= 2;
2066 *((short *)temp) = (short) tmp_l;
2067 tgt_p += sizeof(struct T6_keyBlock_hdr);
2068 keyHdr_p = (struct cca_token_hdr *)tgt_p;
2069 keyHdr_p->token_identifier = CCA_TKN_HDR_ID_EXT;
2070 tmp_l -= 4;
2071 keyHdr_p->token_length = tmp_l;
2072 tgt_p += sizeof(struct cca_token_hdr);
2073 pvtSec_p = (struct cca_pvt_ext_CRT_sec *)tgt_p;
2074 pvtSec_p->section_identifier = CCA_PVT_EXT_CRT_SEC_ID_PVT;
2075 pvtSec_p->section_length =
2076 sizeof(struct cca_pvt_ext_CRT_sec) + keyPartsLen;
2077 pvtSec_p->key_format = CCA_PVT_EXT_CRT_SEC_FMT_CL;
2078 pvtSec_p->key_use_flags[0] = CCA_PVT_USAGE_ALL;
2079 pvtSec_p->p_len = long_len;
2080 pvtSec_p->q_len = short_len;
2081 pvtSec_p->dp_len = long_len;
2082 pvtSec_p->dq_len = short_len;
2083 pvtSec_p->u_len = long_len;
2084 pvtSec_p->mod_len = mod_len;
2085 pvtSec_p->pad_len = pad_len;
2086 tgt_p += sizeof(struct cca_pvt_ext_CRT_sec);
2087 if (copy_from_user(tgt_p, icaMsg_p->np_prime, long_len))
2088 return SEN_RELEASED;
2089 if (is_empty(tgt_p, long_len))
2090 return SEN_USER_ERROR;
2091 tgt_p += long_len;
2092 if (copy_from_user(tgt_p, icaMsg_p->nq_prime, short_len))
2093 return SEN_RELEASED;
2094 if (is_empty(tgt_p, short_len))
2095 return SEN_USER_ERROR;
2096 tgt_p += short_len;
2097 if (copy_from_user(tgt_p, icaMsg_p->bp_key, long_len))
2098 return SEN_RELEASED;
2099 if (is_empty(tgt_p, long_len))
2100 return SEN_USER_ERROR;
2101 tgt_p += long_len;
2102 if (copy_from_user(tgt_p, icaMsg_p->bq_key, short_len))
2103 return SEN_RELEASED;
2104 if (is_empty(tgt_p, short_len))
2105 return SEN_USER_ERROR;
2106 tgt_p += short_len;
2107 if (copy_from_user(tgt_p, icaMsg_p->u_mult_inv, long_len))
2108 return SEN_RELEASED;
2109 if (is_empty(tgt_p, long_len))
2110 return SEN_USER_ERROR;
2111 tgt_p += long_len;
2112 tgt_p += pad_len;
2113 memset(tgt_p, 0xFF, mod_len);
2114 tgt_p += mod_len;
2115 memcpy(tgt_p, &static_cca_pub_sec, sizeof(struct cca_public_sec));
2116 pubSec_p = (struct cca_public_sec *) tgt_p;
2117 pubSec_p->modulus_bit_len = 8 * mod_len;
2118 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
2119
2120 return 0;
2121}
2122
2123static int
2124ICAMEX_msg_to_type50MEX_msg(struct ica_rsa_modexpo *icaMex_p, int *z90cMsg_l_p,
2125 union type50_msg *z90cMsg_p)
2126{
2127 int mod_len, msg_size, mod_tgt_len, exp_tgt_len, inp_tgt_len;
2128 unsigned char *mod_tgt, *exp_tgt, *inp_tgt;
2129 union type50_msg *tmp_type50_msg;
2130
2131 mod_len = icaMex_p->inputdatalength;
2132
2133 msg_size = ((mod_len <= 128) ? TYPE50_MEB1_LEN : TYPE50_MEB2_LEN) +
2134 CALLER_HEADER;
2135
2136 memset(z90cMsg_p, 0, msg_size);
2137
2138 tmp_type50_msg = (union type50_msg *)
2139 ((unsigned char *) z90cMsg_p + CALLER_HEADER);
2140
2141 tmp_type50_msg->meb1.header.msg_type_code = TYPE50_TYPE_CODE;
2142
2143 if (mod_len <= 128) {
2144 tmp_type50_msg->meb1.header.msg_len = TYPE50_MEB1_LEN;
2145 tmp_type50_msg->meb1.keyblock_type = TYPE50_MEB1_FMT;
2146 mod_tgt = tmp_type50_msg->meb1.modulus;
2147 mod_tgt_len = sizeof(tmp_type50_msg->meb1.modulus);
2148 exp_tgt = tmp_type50_msg->meb1.exponent;
2149 exp_tgt_len = sizeof(tmp_type50_msg->meb1.exponent);
2150 inp_tgt = tmp_type50_msg->meb1.message;
2151 inp_tgt_len = sizeof(tmp_type50_msg->meb1.message);
2152 } else {
2153 tmp_type50_msg->meb2.header.msg_len = TYPE50_MEB2_LEN;
2154 tmp_type50_msg->meb2.keyblock_type = TYPE50_MEB2_FMT;
2155 mod_tgt = tmp_type50_msg->meb2.modulus;
2156 mod_tgt_len = sizeof(tmp_type50_msg->meb2.modulus);
2157 exp_tgt = tmp_type50_msg->meb2.exponent;
2158 exp_tgt_len = sizeof(tmp_type50_msg->meb2.exponent);
2159 inp_tgt = tmp_type50_msg->meb2.message;
2160 inp_tgt_len = sizeof(tmp_type50_msg->meb2.message);
2161 }
2162
2163 mod_tgt += (mod_tgt_len - mod_len);
2164 if (copy_from_user(mod_tgt, icaMex_p->n_modulus, mod_len))
2165 return SEN_RELEASED;
2166 if (is_empty(mod_tgt, mod_len))
2167 return SEN_USER_ERROR;
2168 exp_tgt += (exp_tgt_len - mod_len);
2169 if (copy_from_user(exp_tgt, icaMex_p->b_key, mod_len))
2170 return SEN_RELEASED;
2171 if (is_empty(exp_tgt, mod_len))
2172 return SEN_USER_ERROR;
2173 inp_tgt += (inp_tgt_len - mod_len);
2174 if (copy_from_user(inp_tgt, icaMex_p->inputdata, mod_len))
2175 return SEN_RELEASED;
2176 if (is_empty(inp_tgt, mod_len))
2177 return SEN_USER_ERROR;
2178
2179 *z90cMsg_l_p = msg_size - CALLER_HEADER;
2180
2181 return 0;
2182}
2183
2184static int
2185ICACRT_msg_to_type50CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p,
2186 int *z90cMsg_l_p, union type50_msg *z90cMsg_p)
2187{
2188 int mod_len, short_len, long_len, tmp_size, p_tgt_len, q_tgt_len,
2189 dp_tgt_len, dq_tgt_len, u_tgt_len, inp_tgt_len, long_offset;
2190 unsigned char *p_tgt, *q_tgt, *dp_tgt, *dq_tgt, *u_tgt, *inp_tgt,
2191 temp[8];
2192 union type50_msg *tmp_type50_msg;
2193
2194 mod_len = icaMsg_p->inputdatalength;
2195 short_len = mod_len / 2;
2196 long_len = mod_len / 2 + 8;
2197 long_offset = 0;
2198
2199 if (long_len > 128) {
2200 memset(temp, 0x00, sizeof(temp));
2201 if (copy_from_user(temp, icaMsg_p->np_prime, long_len-128))
2202 return SEN_RELEASED;
2203 if (!is_empty(temp, 8))
2204 return SEN_NOT_AVAIL;
2205 if (copy_from_user(temp, icaMsg_p->bp_key, long_len-128))
2206 return SEN_RELEASED;
2207 if (!is_empty(temp, 8))
2208 return SEN_NOT_AVAIL;
2209 if (copy_from_user(temp, icaMsg_p->u_mult_inv, long_len-128))
2210 return SEN_RELEASED;
2211 if (!is_empty(temp, 8))
2212 return SEN_NOT_AVAIL;
2213 long_offset = long_len - 128;
2214 long_len = 128;
2215 }
2216
2217 tmp_size = ((long_len <= 64) ? TYPE50_CRB1_LEN : TYPE50_CRB2_LEN) +
2218 CALLER_HEADER;
2219
2220 memset(z90cMsg_p, 0, tmp_size);
2221
2222 tmp_type50_msg = (union type50_msg *)
2223 ((unsigned char *) z90cMsg_p + CALLER_HEADER);
2224
2225 tmp_type50_msg->crb1.header.msg_type_code = TYPE50_TYPE_CODE;
2226 if (long_len <= 64) {
2227 tmp_type50_msg->crb1.header.msg_len = TYPE50_CRB1_LEN;
2228 tmp_type50_msg->crb1.keyblock_type = TYPE50_CRB1_FMT;
2229 p_tgt = tmp_type50_msg->crb1.p;
2230 p_tgt_len = sizeof(tmp_type50_msg->crb1.p);
2231 q_tgt = tmp_type50_msg->crb1.q;
2232 q_tgt_len = sizeof(tmp_type50_msg->crb1.q);
2233 dp_tgt = tmp_type50_msg->crb1.dp;
2234 dp_tgt_len = sizeof(tmp_type50_msg->crb1.dp);
2235 dq_tgt = tmp_type50_msg->crb1.dq;
2236 dq_tgt_len = sizeof(tmp_type50_msg->crb1.dq);
2237 u_tgt = tmp_type50_msg->crb1.u;
2238 u_tgt_len = sizeof(tmp_type50_msg->crb1.u);
2239 inp_tgt = tmp_type50_msg->crb1.message;
2240 inp_tgt_len = sizeof(tmp_type50_msg->crb1.message);
2241 } else {
2242 tmp_type50_msg->crb2.header.msg_len = TYPE50_CRB2_LEN;
2243 tmp_type50_msg->crb2.keyblock_type = TYPE50_CRB2_FMT;
2244 p_tgt = tmp_type50_msg->crb2.p;
2245 p_tgt_len = sizeof(tmp_type50_msg->crb2.p);
2246 q_tgt = tmp_type50_msg->crb2.q;
2247 q_tgt_len = sizeof(tmp_type50_msg->crb2.q);
2248 dp_tgt = tmp_type50_msg->crb2.dp;
2249 dp_tgt_len = sizeof(tmp_type50_msg->crb2.dp);
2250 dq_tgt = tmp_type50_msg->crb2.dq;
2251 dq_tgt_len = sizeof(tmp_type50_msg->crb2.dq);
2252 u_tgt = tmp_type50_msg->crb2.u;
2253 u_tgt_len = sizeof(tmp_type50_msg->crb2.u);
2254 inp_tgt = tmp_type50_msg->crb2.message;
2255 inp_tgt_len = sizeof(tmp_type50_msg->crb2.message);
2256 }
2257
2258 p_tgt += (p_tgt_len - long_len);
2259 if (copy_from_user(p_tgt, icaMsg_p->np_prime + long_offset, long_len))
2260 return SEN_RELEASED;
2261 if (is_empty(p_tgt, long_len))
2262 return SEN_USER_ERROR;
2263 q_tgt += (q_tgt_len - short_len);
2264 if (copy_from_user(q_tgt, icaMsg_p->nq_prime, short_len))
2265 return SEN_RELEASED;
2266 if (is_empty(q_tgt, short_len))
2267 return SEN_USER_ERROR;
2268 dp_tgt += (dp_tgt_len - long_len);
2269 if (copy_from_user(dp_tgt, icaMsg_p->bp_key + long_offset, long_len))
2270 return SEN_RELEASED;
2271 if (is_empty(dp_tgt, long_len))
2272 return SEN_USER_ERROR;
2273 dq_tgt += (dq_tgt_len - short_len);
2274 if (copy_from_user(dq_tgt, icaMsg_p->bq_key, short_len))
2275 return SEN_RELEASED;
2276 if (is_empty(dq_tgt, short_len))
2277 return SEN_USER_ERROR;
2278 u_tgt += (u_tgt_len - long_len);
2279 if (copy_from_user(u_tgt, icaMsg_p->u_mult_inv + long_offset, long_len))
2280 return SEN_RELEASED;
2281 if (is_empty(u_tgt, long_len))
2282 return SEN_USER_ERROR;
2283 inp_tgt += (inp_tgt_len - mod_len);
2284 if (copy_from_user(inp_tgt, icaMsg_p->inputdata, mod_len))
2285 return SEN_RELEASED;
2286 if (is_empty(inp_tgt, mod_len))
2287 return SEN_USER_ERROR;
2288
2289 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
2290
2291 return 0;
2292}
2293
2294int
2295convert_request(unsigned char *buffer, int func, unsigned short function,
2296 int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p)
2297{
2298 if (dev_type == PCICA) {
2299 if (func == ICARSACRT)
2300 return ICACRT_msg_to_type4CRT_msg(
2301 (struct ica_rsa_modexpo_crt *) buffer,
2302 msg_l_p, (union type4_msg *) msg_p);
2303 else
2304 return ICAMEX_msg_to_type4MEX_msg(
2305 (struct ica_rsa_modexpo *) buffer,
2306 msg_l_p, (union type4_msg *) msg_p);
2307 }
2308 if (dev_type == PCICC) {
2309 if (func == ICARSACRT)
2310 return ICACRT_msg_to_type6CRT_msg(
2311 (struct ica_rsa_modexpo_crt *) buffer,
2312 cdx, msg_l_p, (struct type6_msg *)msg_p);
2313 if (function == PCI_FUNC_KEY_ENCRYPT)
2314 return ICAMEX_msg_to_type6MEX_en_msg(
2315 (struct ica_rsa_modexpo *) buffer,
2316 cdx, msg_l_p, (struct type6_msg *) msg_p);
2317 else
2318 return ICAMEX_msg_to_type6MEX_de_msg(
2319 (struct ica_rsa_modexpo *) buffer,
2320 cdx, msg_l_p, (struct type6_msg *) msg_p);
2321 }
2322 if ((dev_type == PCIXCC_MCL2) ||
2323 (dev_type == PCIXCC_MCL3) ||
2324 (dev_type == CEX2C)) {
2325 if (func == ICARSACRT)
2326 return ICACRT_msg_to_type6CRT_msgX(
2327 (struct ica_rsa_modexpo_crt *) buffer,
2328 cdx, msg_l_p, (struct type6_msg *) msg_p,
2329 dev_type);
2330 else
2331 return ICAMEX_msg_to_type6MEX_msgX(
2332 (struct ica_rsa_modexpo *) buffer,
2333 cdx, msg_l_p, (struct type6_msg *) msg_p,
2334 dev_type);
2335 }
2336 if (dev_type == CEX2A) {
2337 if (func == ICARSACRT)
2338 return ICACRT_msg_to_type50CRT_msg(
2339 (struct ica_rsa_modexpo_crt *) buffer,
2340 msg_l_p, (union type50_msg *) msg_p);
2341 else
2342 return ICAMEX_msg_to_type50MEX_msg(
2343 (struct ica_rsa_modexpo *) buffer,
2344 msg_l_p, (union type50_msg *) msg_p);
2345 }
2346
2347 return 0;
2348}
2349
2350int ext_bitlens_msg_count = 0;
2351static inline void
2352unset_ext_bitlens(void)
2353{
2354 if (!ext_bitlens_msg_count) {
2355 PRINTK("Unable to use coprocessors for extended bitlengths. "
2356 "Using PCICAs/CEX2As (if present) for extended "
2357 "bitlengths. This is not an error.\n");
2358 ext_bitlens_msg_count++;
2359 }
2360 ext_bitlens = 0;
2361}
2362
2363int
2364convert_response(unsigned char *response, unsigned char *buffer,
2365 int *respbufflen_p, unsigned char *resp_buff)
2366{
2367 struct ica_rsa_modexpo *icaMsg_p = (struct ica_rsa_modexpo *) buffer;
2368 struct error_hdr *errh_p = (struct error_hdr *) response;
2369 struct type80_hdr *t80h_p = (struct type80_hdr *) response;
2370 struct type84_hdr *t84h_p = (struct type84_hdr *) response;
2371 struct type86_fmt2_msg *t86m_p = (struct type86_fmt2_msg *) response;
2372 int reply_code, service_rc, service_rs, src_l;
2373 unsigned char *src_p, *tgt_p;
2374 struct CPRB *cprb_p;
2375 struct CPRBX *cprbx_p;
2376
2377 src_p = 0;
2378 reply_code = 0;
2379 service_rc = 0;
2380 service_rs = 0;
2381 src_l = 0;
2382 switch (errh_p->type) {
2383 case TYPE82_RSP_CODE:
2384 case TYPE88_RSP_CODE:
2385 reply_code = errh_p->reply_code;
2386 src_p = (unsigned char *)errh_p;
2387 PRINTK("Hardware error: Type %02X Message Header: "
2388 "%02x%02x%02x%02x%02x%02x%02x%02x\n",
2389 errh_p->type,
2390 src_p[0], src_p[1], src_p[2], src_p[3],
2391 src_p[4], src_p[5], src_p[6], src_p[7]);
2392 break;
2393 case TYPE80_RSP_CODE:
2394 src_l = icaMsg_p->outputdatalength;
2395 src_p = response + (int)t80h_p->len - src_l;
2396 break;
2397 case TYPE84_RSP_CODE:
2398 src_l = icaMsg_p->outputdatalength;
2399 src_p = response + (int)t84h_p->len - src_l;
2400 break;
2401 case TYPE86_RSP_CODE:
2402 reply_code = t86m_p->header.reply_code;
2403 if (reply_code != 0)
2404 break;
2405 cprb_p = (struct CPRB *)
2406 (response + sizeof(struct type86_fmt2_msg));
2407 cprbx_p = (struct CPRBX *) cprb_p;
2408 if (cprb_p->cprb_ver_id != 0x02) {
2409 le2toI(cprb_p->ccp_rtcode, &service_rc);
2410 if (service_rc != 0) {
2411 le2toI(cprb_p->ccp_rscode, &service_rs);
2412 if ((service_rc == 8) && (service_rs == 66))
2413 PDEBUG("Bad block format on PCICC\n");
2414 else if ((service_rc == 8) && (service_rs == 65))
2415 PDEBUG("Probably an even modulus on "
2416 "PCICC\n");
2417 else if ((service_rc == 8) && (service_rs == 770)) {
2418 PDEBUG("Invalid key length on PCICC\n");
2419 unset_ext_bitlens();
2420 return REC_USE_PCICA;
2421 }
2422 else if ((service_rc == 8) && (service_rs == 783)) {
2423 PDEBUG("Extended bitlengths not enabled"
2424 "on PCICC\n");
2425 unset_ext_bitlens();
2426 return REC_USE_PCICA;
2427 }
2428 else
2429 PRINTK("service rc/rs (PCICC): %d/%d\n",
2430 service_rc, service_rs);
2431 return REC_OPERAND_INV;
2432 }
2433 src_p = (unsigned char *)cprb_p + sizeof(struct CPRB);
2434 src_p += 4;
2435 le2toI(src_p, &src_l);
2436 src_l -= 2;
2437 src_p += 2;
2438 } else {
2439 service_rc = (int)cprbx_p->ccp_rtcode;
2440 if (service_rc != 0) {
2441 service_rs = (int) cprbx_p->ccp_rscode;
2442 if ((service_rc == 8) && (service_rs == 66))
2443 PDEBUG("Bad block format on PCIXCC\n");
2444 else if ((service_rc == 8) && (service_rs == 65))
2445 PDEBUG("Probably an even modulus on "
2446 "PCIXCC\n");
2447 else if ((service_rc == 8) && (service_rs == 770)) {
2448 PDEBUG("Invalid key length on PCIXCC\n");
2449 unset_ext_bitlens();
2450 return REC_USE_PCICA;
2451 }
2452 else if ((service_rc == 8) && (service_rs == 783)) {
2453 PDEBUG("Extended bitlengths not enabled"
2454 "on PCIXCC\n");
2455 unset_ext_bitlens();
2456 return REC_USE_PCICA;
2457 }
2458 else
2459 PRINTK("service rc/rs (PCIXCC): %d/%d\n",
2460 service_rc, service_rs);
2461 return REC_OPERAND_INV;
2462 }
2463 src_p = (unsigned char *)
2464 cprbx_p + sizeof(struct CPRBX);
2465 src_p += 4;
2466 src_l = (int)(*((short *) src_p));
2467 src_l -= 2;
2468 src_p += 2;
2469 }
2470 break;
2471 default:
2472 src_p = (unsigned char *)errh_p;
2473 PRINTK("Unrecognized Message Header: "
2474 "%02x%02x%02x%02x%02x%02x%02x%02x\n",
2475 src_p[0], src_p[1], src_p[2], src_p[3],
2476 src_p[4], src_p[5], src_p[6], src_p[7]);
2477 return REC_BAD_MESSAGE;
2478 }
2479
2480 if (reply_code)
2481 switch (reply_code) {
2482 case REP82_ERROR_MACHINE_FAILURE:
2483 if (errh_p->type == TYPE82_RSP_CODE)
2484 PRINTKW("Machine check failure\n");
2485 else
2486 PRINTKW("Module failure\n");
2487 return REC_HARDWAR_ERR;
2488 case REP82_ERROR_OPERAND_INVALID:
2489 return REC_OPERAND_INV;
2490 case REP88_ERROR_MESSAGE_MALFORMD:
2491 PRINTKW("Message malformed\n");
2492 return REC_OPERAND_INV;
2493 case REP82_ERROR_OPERAND_SIZE:
2494 return REC_OPERAND_SIZE;
2495 case REP82_ERROR_EVEN_MOD_IN_OPND:
2496 return REC_EVEN_MOD;
2497 case REP82_ERROR_MESSAGE_TYPE:
2498 return WRONG_DEVICE_TYPE;
2499 case REP82_ERROR_TRANSPORT_FAIL:
2500 PRINTKW("Transport failed (APFS = %02X%02X%02X%02X)\n",
2501 t86m_p->apfs[0], t86m_p->apfs[1],
2502 t86m_p->apfs[2], t86m_p->apfs[3]);
2503 return REC_HARDWAR_ERR;
2504 default:
2505 PRINTKW("reply code = %d\n", reply_code);
2506 return REC_HARDWAR_ERR;
2507 }
2508
2509 if (service_rc != 0)
2510 return REC_OPERAND_INV;
2511
2512 if ((src_l > icaMsg_p->outputdatalength) ||
2513 (src_l > RESPBUFFSIZE) ||
2514 (src_l <= 0))
2515 return REC_OPERAND_SIZE;
2516
2517 PDEBUG("Length returned = %d\n", src_l);
2518 tgt_p = resp_buff + icaMsg_p->outputdatalength - src_l;
2519 memcpy(tgt_p, src_p, src_l);
2520 if ((errh_p->type == TYPE86_RSP_CODE) && (resp_buff < tgt_p)) {
2521 memset(resp_buff, 0, icaMsg_p->outputdatalength - src_l);
2522 if (pad_msg(resp_buff, icaMsg_p->outputdatalength, src_l))
2523 return REC_INVALID_PAD;
2524 }
2525 *respbufflen_p = icaMsg_p->outputdatalength;
2526 if (*respbufflen_p == 0)
2527 PRINTK("Zero *respbufflen_p\n");
2528
2529 return 0;
2530}
2531
diff --git a/drivers/s390/crypto/z90main.c b/drivers/s390/crypto/z90main.c
deleted file mode 100644
index b2f20ab8431a..000000000000
--- a/drivers/s390/crypto/z90main.c
+++ /dev/null
@@ -1,3379 +0,0 @@
1/*
2 * linux/drivers/s390/crypto/z90main.c
3 *
4 * z90crypt 1.3.3
5 *
6 * Copyright (C) 2001, 2005 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#include <asm/uaccess.h> // copy_(from|to)_user
28#include <linux/compat.h>
29#include <linux/compiler.h>
30#include <linux/delay.h> // mdelay
31#include <linux/init.h>
32#include <linux/interrupt.h> // for tasklets
33#include <linux/miscdevice.h>
34#include <linux/module.h>
35#include <linux/moduleparam.h>
36#include <linux/proc_fs.h>
37#include <linux/syscalls.h>
38#include "z90crypt.h"
39#include "z90common.h"
40
41/**
42 * Defaults that may be modified.
43 */
44
45/**
46 * You can specify a different minor at compile time.
47 */
48#ifndef Z90CRYPT_MINOR
49#define Z90CRYPT_MINOR MISC_DYNAMIC_MINOR
50#endif
51
52/**
53 * You can specify a different domain at compile time or on the insmod
54 * command line.
55 */
56#ifndef DOMAIN_INDEX
57#define DOMAIN_INDEX -1
58#endif
59
60/**
61 * This is the name under which the device is registered in /proc/modules.
62 */
63#define REG_NAME "z90crypt"
64
65/**
66 * Cleanup should run every CLEANUPTIME seconds and should clean up requests
67 * older than CLEANUPTIME seconds in the past.
68 */
69#ifndef CLEANUPTIME
70#define CLEANUPTIME 15
71#endif
72
73/**
74 * Config should run every CONFIGTIME seconds
75 */
76#ifndef CONFIGTIME
77#define CONFIGTIME 30
78#endif
79
80/**
81 * The first execution of the config task should take place
82 * immediately after initialization
83 */
84#ifndef INITIAL_CONFIGTIME
85#define INITIAL_CONFIGTIME 1
86#endif
87
88/**
89 * Reader should run every READERTIME milliseconds
90 * With the 100Hz patch for s390, z90crypt can lock the system solid while
91 * under heavy load. We'll try to avoid that.
92 */
93#ifndef READERTIME
94#if HZ > 1000
95#define READERTIME 2
96#else
97#define READERTIME 10
98#endif
99#endif
100
101/**
102 * turn long device array index into device pointer
103 */
104#define LONG2DEVPTR(ndx) (z90crypt.device_p[(ndx)])
105
106/**
107 * turn short device array index into long device array index
108 */
109#define SHRT2LONG(ndx) (z90crypt.overall_device_x.device_index[(ndx)])
110
111/**
112 * turn short device array index into device pointer
113 */
114#define SHRT2DEVPTR(ndx) LONG2DEVPTR(SHRT2LONG(ndx))
115
116/**
117 * Status for a work-element
118 */
119#define STAT_DEFAULT 0x00 // request has not been processed
120
121#define STAT_ROUTED 0x80 // bit 7: requests get routed to specific device
122 // else, device is determined each write
123#define STAT_FAILED 0x40 // bit 6: this bit is set if the request failed
124 // before being sent to the hardware.
125#define STAT_WRITTEN 0x30 // bits 5-4: work to be done, not sent to device
126// 0x20 // UNUSED state
127#define STAT_READPEND 0x10 // bits 5-4: work done, we're returning data now
128#define STAT_NOWORK 0x00 // bits off: no work on any queue
129#define STAT_RDWRMASK 0x30 // mask for bits 5-4
130
131/**
132 * Macros to check the status RDWRMASK
133 */
134#define CHK_RDWRMASK(statbyte) ((statbyte) & STAT_RDWRMASK)
135#define SET_RDWRMASK(statbyte, newval) \
136 {(statbyte) &= ~STAT_RDWRMASK; (statbyte) |= newval;}
137
138/**
139 * Audit Trail. Progress of a Work element
140 * audit[0]: Unless noted otherwise, these bits are all set by the process
141 */
142#define FP_COPYFROM 0x80 // Caller's buffer has been copied to work element
143#define FP_BUFFREQ 0x40 // Low Level buffer requested
144#define FP_BUFFGOT 0x20 // Low Level buffer obtained
145#define FP_SENT 0x10 // Work element sent to a crypto device
146 // (may be set by process or by reader task)
147#define FP_PENDING 0x08 // Work element placed on pending queue
148 // (may be set by process or by reader task)
149#define FP_REQUEST 0x04 // Work element placed on request queue
150#define FP_ASLEEP 0x02 // Work element about to sleep
151#define FP_AWAKE 0x01 // Work element has been awakened
152
153/**
154 * audit[1]: These bits are set by the reader task and/or the cleanup task
155 */
156#define FP_NOTPENDING 0x80 // Work element removed from pending queue
157#define FP_AWAKENING 0x40 // Caller about to be awakened
158#define FP_TIMEDOUT 0x20 // Caller timed out
159#define FP_RESPSIZESET 0x10 // Response size copied to work element
160#define FP_RESPADDRCOPIED 0x08 // Response address copied to work element
161#define FP_RESPBUFFCOPIED 0x04 // Response buffer copied to work element
162#define FP_REMREQUEST 0x02 // Work element removed from request queue
163#define FP_SIGNALED 0x01 // Work element was awakened by a signal
164
165/**
166 * audit[2]: unused
167 */
168
169/**
170 * state of the file handle in private_data.status
171 */
172#define STAT_OPEN 0
173#define STAT_CLOSED 1
174
175/**
176 * PID() expands to the process ID of the current process
177 */
178#define PID() (current->pid)
179
180/**
181 * Selected Constants. The number of APs and the number of devices
182 */
183#ifndef Z90CRYPT_NUM_APS
184#define Z90CRYPT_NUM_APS 64
185#endif
186#ifndef Z90CRYPT_NUM_DEVS
187#define Z90CRYPT_NUM_DEVS Z90CRYPT_NUM_APS
188#endif
189
190/**
191 * Buffer size for receiving responses. The maximum Response Size
192 * is actually the maximum request size, since in an error condition
193 * the request itself may be returned unchanged.
194 */
195#define MAX_RESPONSE_SIZE 0x0000077C
196
197/**
198 * A count and status-byte mask
199 */
200struct status {
201 int st_count; // # of enabled devices
202 int disabled_count; // # of disabled devices
203 int user_disabled_count; // # of devices disabled via proc fs
204 unsigned char st_mask[Z90CRYPT_NUM_APS]; // current status mask
205};
206
207/**
208 * The array of device indexes is a mechanism for fast indexing into
209 * a long (and sparse) array. For instance, if APs 3, 9 and 47 are
210 * installed, z90CDeviceIndex[0] is 3, z90CDeviceIndex[1] is 9, and
211 * z90CDeviceIndex[2] is 47.
212 */
213struct device_x {
214 int device_index[Z90CRYPT_NUM_DEVS];
215};
216
217/**
218 * All devices are arranged in a single array: 64 APs
219 */
220struct device {
221 int dev_type; // PCICA, PCICC, PCIXCC_MCL2,
222 // PCIXCC_MCL3, CEX2C, CEX2A
223 enum devstat dev_stat; // current device status
224 int dev_self_x; // Index in array
225 int disabled; // Set when device is in error
226 int user_disabled; // Set when device is disabled by user
227 int dev_q_depth; // q depth
228 unsigned char * dev_resp_p; // Response buffer address
229 int dev_resp_l; // Response Buffer length
230 int dev_caller_count; // Number of callers
231 int dev_total_req_cnt; // # requests for device since load
232 struct list_head dev_caller_list; // List of callers
233};
234
235/**
236 * There's a struct status and a struct device_x for each device type.
237 */
238struct hdware_block {
239 struct status hdware_mask;
240 struct status type_mask[Z90CRYPT_NUM_TYPES];
241 struct device_x type_x_addr[Z90CRYPT_NUM_TYPES];
242 unsigned char device_type_array[Z90CRYPT_NUM_APS];
243};
244
245/**
246 * z90crypt is the topmost data structure in the hierarchy.
247 */
248struct z90crypt {
249 int max_count; // Nr of possible crypto devices
250 struct status mask;
251 int q_depth_array[Z90CRYPT_NUM_DEVS];
252 int dev_type_array[Z90CRYPT_NUM_DEVS];
253 struct device_x overall_device_x; // array device indexes
254 struct device * device_p[Z90CRYPT_NUM_DEVS];
255 int terminating;
256 int domain_established;// TRUE: domain has been found
257 int cdx; // Crypto Domain Index
258 int len; // Length of this data structure
259 struct hdware_block *hdware_info;
260};
261
262/**
263 * An array of these structures is pointed to from dev_caller
264 * The length of the array depends on the device type. For APs,
265 * there are 8.
266 *
267 * The caller buffer is allocated to the user at OPEN. At WRITE,
268 * it contains the request; at READ, the response. The function
269 * send_to_crypto_device converts the request to device-dependent
270 * form and use the caller's OPEN-allocated buffer for the response.
271 *
272 * For the contents of caller_dev_dep_req and caller_dev_dep_req_p
273 * because that points to it, see the discussion in z90hardware.c.
274 * Search for "extended request message block".
275 */
276struct caller {
277 int caller_buf_l; // length of original request
278 unsigned char * caller_buf_p; // Original request on WRITE
279 int caller_dev_dep_req_l; // len device dependent request
280 unsigned char * caller_dev_dep_req_p; // Device dependent form
281 unsigned char caller_id[8]; // caller-supplied message id
282 struct list_head caller_liste;
283 unsigned char caller_dev_dep_req[MAX_RESPONSE_SIZE];
284};
285
286/**
287 * Function prototypes from z90hardware.c
288 */
289enum hdstat query_online(int deviceNr, int cdx, int resetNr, int *q_depth,
290 int *dev_type);
291enum devstat reset_device(int deviceNr, int cdx, int resetNr);
292enum devstat send_to_AP(int dev_nr, int cdx, int msg_len, unsigned char *msg_ext);
293enum devstat receive_from_AP(int dev_nr, int cdx, int resplen,
294 unsigned char *resp, unsigned char *psmid);
295int convert_request(unsigned char *buffer, int func, unsigned short function,
296 int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p);
297int convert_response(unsigned char *response, unsigned char *buffer,
298 int *respbufflen_p, unsigned char *resp_buff);
299
300/**
301 * Low level function prototypes
302 */
303static int create_z90crypt(int *cdx_p);
304static int refresh_z90crypt(int *cdx_p);
305static int find_crypto_devices(struct status *deviceMask);
306static int create_crypto_device(int index);
307static int destroy_crypto_device(int index);
308static void destroy_z90crypt(void);
309static int refresh_index_array(struct status *status_str,
310 struct device_x *index_array);
311static int probe_device_type(struct device *devPtr);
312static int probe_PCIXCC_type(struct device *devPtr);
313
314/**
315 * proc fs definitions
316 */
317static struct proc_dir_entry *z90crypt_entry;
318
319/**
320 * data structures
321 */
322
323/**
324 * work_element.opener points back to this structure
325 */
326struct priv_data {
327 pid_t opener_pid;
328 unsigned char status; // 0: open 1: closed
329};
330
331/**
332 * A work element is allocated for each request
333 */
334struct work_element {
335 struct priv_data *priv_data;
336 pid_t pid;
337 int devindex; // index of device processing this w_e
338 // (If request did not specify device,
339 // -1 until placed onto a queue)
340 int devtype;
341 struct list_head liste; // used for requestq and pendingq
342 char buffer[128]; // local copy of user request
343 int buff_size; // size of the buffer for the request
344 char resp_buff[RESPBUFFSIZE];
345 int resp_buff_size;
346 char __user * resp_addr; // address of response in user space
347 unsigned int funccode; // function code of request
348 wait_queue_head_t waitq;
349 unsigned long requestsent; // time at which the request was sent
350 atomic_t alarmrung; // wake-up signal
351 unsigned char caller_id[8]; // pid + counter, for this w_e
352 unsigned char status[1]; // bits to mark status of the request
353 unsigned char audit[3]; // record of work element's progress
354 unsigned char * requestptr; // address of request buffer
355 int retcode; // return code of request
356};
357
358/**
359 * High level function prototypes
360 */
361static int z90crypt_open(struct inode *, struct file *);
362static int z90crypt_release(struct inode *, struct file *);
363static ssize_t z90crypt_read(struct file *, char __user *, size_t, loff_t *);
364static ssize_t z90crypt_write(struct file *, const char __user *,
365 size_t, loff_t *);
366static long z90crypt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
367static long z90crypt_compat_ioctl(struct file *, unsigned int, unsigned long);
368
369static void z90crypt_reader_task(unsigned long);
370static void z90crypt_schedule_reader_task(unsigned long);
371static void z90crypt_config_task(unsigned long);
372static void z90crypt_cleanup_task(unsigned long);
373
374static int z90crypt_status(char *, char **, off_t, int, int *, void *);
375static int z90crypt_status_write(struct file *, const char __user *,
376 unsigned long, void *);
377
378/**
379 * Storage allocated at initialization and used throughout the life of
380 * this insmod
381 */
382static int domain = DOMAIN_INDEX;
383static struct z90crypt z90crypt;
384static int quiesce_z90crypt;
385static spinlock_t queuespinlock;
386static struct list_head request_list;
387static int requestq_count;
388static struct list_head pending_list;
389static int pendingq_count;
390
391static struct tasklet_struct reader_tasklet;
392static struct timer_list reader_timer;
393static struct timer_list config_timer;
394static struct timer_list cleanup_timer;
395static atomic_t total_open;
396static atomic_t z90crypt_step;
397
398static struct file_operations z90crypt_fops = {
399 .owner = THIS_MODULE,
400 .read = z90crypt_read,
401 .write = z90crypt_write,
402 .unlocked_ioctl = z90crypt_unlocked_ioctl,
403#ifdef CONFIG_COMPAT
404 .compat_ioctl = z90crypt_compat_ioctl,
405#endif
406 .open = z90crypt_open,
407 .release = z90crypt_release
408};
409
410static struct miscdevice z90crypt_misc_device = {
411 .minor = Z90CRYPT_MINOR,
412 .name = DEV_NAME,
413 .fops = &z90crypt_fops,
414};
415
416/**
417 * Documentation values.
418 */
419MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman"
420 "and Jochen Roehrig");
421MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, "
422 "Copyright 2001, 2005 IBM Corporation");
423MODULE_LICENSE("GPL");
424module_param(domain, int, 0);
425MODULE_PARM_DESC(domain, "domain index for device");
426
427#ifdef CONFIG_COMPAT
428/**
429 * ioctl32 conversion routines
430 */
431struct ica_rsa_modexpo_32 { // For 32-bit callers
432 compat_uptr_t inputdata;
433 unsigned int inputdatalength;
434 compat_uptr_t outputdata;
435 unsigned int outputdatalength;
436 compat_uptr_t b_key;
437 compat_uptr_t n_modulus;
438};
439
440static long
441trans_modexpo32(struct file *filp, unsigned int cmd, unsigned long arg)
442{
443 struct ica_rsa_modexpo_32 __user *mex32u = compat_ptr(arg);
444 struct ica_rsa_modexpo_32 mex32k;
445 struct ica_rsa_modexpo __user *mex64;
446 long ret = 0;
447 unsigned int i;
448
449 if (!access_ok(VERIFY_WRITE, mex32u, sizeof(struct ica_rsa_modexpo_32)))
450 return -EFAULT;
451 mex64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo));
452 if (!access_ok(VERIFY_WRITE, mex64, sizeof(struct ica_rsa_modexpo)))
453 return -EFAULT;
454 if (copy_from_user(&mex32k, mex32u, sizeof(struct ica_rsa_modexpo_32)))
455 return -EFAULT;
456 if (__put_user(compat_ptr(mex32k.inputdata), &mex64->inputdata) ||
457 __put_user(mex32k.inputdatalength, &mex64->inputdatalength) ||
458 __put_user(compat_ptr(mex32k.outputdata), &mex64->outputdata) ||
459 __put_user(mex32k.outputdatalength, &mex64->outputdatalength) ||
460 __put_user(compat_ptr(mex32k.b_key), &mex64->b_key) ||
461 __put_user(compat_ptr(mex32k.n_modulus), &mex64->n_modulus))
462 return -EFAULT;
463 ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)mex64);
464 if (!ret)
465 if (__get_user(i, &mex64->outputdatalength) ||
466 __put_user(i, &mex32u->outputdatalength))
467 ret = -EFAULT;
468 return ret;
469}
470
471struct ica_rsa_modexpo_crt_32 { // For 32-bit callers
472 compat_uptr_t inputdata;
473 unsigned int inputdatalength;
474 compat_uptr_t outputdata;
475 unsigned int outputdatalength;
476 compat_uptr_t bp_key;
477 compat_uptr_t bq_key;
478 compat_uptr_t np_prime;
479 compat_uptr_t nq_prime;
480 compat_uptr_t u_mult_inv;
481};
482
483static long
484trans_modexpo_crt32(struct file *filp, unsigned int cmd, unsigned long arg)
485{
486 struct ica_rsa_modexpo_crt_32 __user *crt32u = compat_ptr(arg);
487 struct ica_rsa_modexpo_crt_32 crt32k;
488 struct ica_rsa_modexpo_crt __user *crt64;
489 long ret = 0;
490 unsigned int i;
491
492 if (!access_ok(VERIFY_WRITE, crt32u,
493 sizeof(struct ica_rsa_modexpo_crt_32)))
494 return -EFAULT;
495 crt64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo_crt));
496 if (!access_ok(VERIFY_WRITE, crt64, sizeof(struct ica_rsa_modexpo_crt)))
497 return -EFAULT;
498 if (copy_from_user(&crt32k, crt32u,
499 sizeof(struct ica_rsa_modexpo_crt_32)))
500 return -EFAULT;
501 if (__put_user(compat_ptr(crt32k.inputdata), &crt64->inputdata) ||
502 __put_user(crt32k.inputdatalength, &crt64->inputdatalength) ||
503 __put_user(compat_ptr(crt32k.outputdata), &crt64->outputdata) ||
504 __put_user(crt32k.outputdatalength, &crt64->outputdatalength) ||
505 __put_user(compat_ptr(crt32k.bp_key), &crt64->bp_key) ||
506 __put_user(compat_ptr(crt32k.bq_key), &crt64->bq_key) ||
507 __put_user(compat_ptr(crt32k.np_prime), &crt64->np_prime) ||
508 __put_user(compat_ptr(crt32k.nq_prime), &crt64->nq_prime) ||
509 __put_user(compat_ptr(crt32k.u_mult_inv), &crt64->u_mult_inv))
510 return -EFAULT;
511 ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)crt64);
512 if (!ret)
513 if (__get_user(i, &crt64->outputdatalength) ||
514 __put_user(i, &crt32u->outputdatalength))
515 ret = -EFAULT;
516 return ret;
517}
518
519static long
520z90crypt_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
521{
522 switch (cmd) {
523 case ICAZ90STATUS:
524 case Z90QUIESCE:
525 case Z90STAT_TOTALCOUNT:
526 case Z90STAT_PCICACOUNT:
527 case Z90STAT_PCICCCOUNT:
528 case Z90STAT_PCIXCCCOUNT:
529 case Z90STAT_PCIXCCMCL2COUNT:
530 case Z90STAT_PCIXCCMCL3COUNT:
531 case Z90STAT_CEX2CCOUNT:
532 case Z90STAT_REQUESTQ_COUNT:
533 case Z90STAT_PENDINGQ_COUNT:
534 case Z90STAT_TOTALOPEN_COUNT:
535 case Z90STAT_DOMAIN_INDEX:
536 case Z90STAT_STATUS_MASK:
537 case Z90STAT_QDEPTH_MASK:
538 case Z90STAT_PERDEV_REQCNT:
539 return z90crypt_unlocked_ioctl(filp, cmd, arg);
540 case ICARSAMODEXPO:
541 return trans_modexpo32(filp, cmd, arg);
542 case ICARSACRT:
543 return trans_modexpo_crt32(filp, cmd, arg);
544 default:
545 return -ENOIOCTLCMD;
546 }
547}
548#endif
549
550/**
551 * The module initialization code.
552 */
553static int __init
554z90crypt_init_module(void)
555{
556 int result, nresult;
557 struct proc_dir_entry *entry;
558
559 PDEBUG("PID %d\n", PID());
560
561 if ((domain < -1) || (domain > 15)) {
562 PRINTKW("Invalid param: domain = %d. Not loading.\n", domain);
563 return -EINVAL;
564 }
565
566 /* Register as misc device with given minor (or get a dynamic one). */
567 result = misc_register(&z90crypt_misc_device);
568 if (result < 0) {
569 PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n",
570 z90crypt_misc_device.minor, result);
571 return result;
572 }
573
574 PDEBUG("Registered " DEV_NAME " with result %d\n", result);
575
576 result = create_z90crypt(&domain);
577 if (result != 0) {
578 PRINTKW("create_z90crypt (domain index %d) failed with %d.\n",
579 domain, result);
580 result = -ENOMEM;
581 goto init_module_cleanup;
582 }
583
584 if (result == 0) {
585 PRINTKN("Version %d.%d.%d loaded, built on %s %s\n",
586 z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT,
587 __DATE__, __TIME__);
588 PDEBUG("create_z90crypt (domain index %d) successful.\n",
589 domain);
590 } else
591 PRINTK("No devices at startup\n");
592
593 /* Initialize globals. */
594 spin_lock_init(&queuespinlock);
595
596 INIT_LIST_HEAD(&pending_list);
597 pendingq_count = 0;
598
599 INIT_LIST_HEAD(&request_list);
600 requestq_count = 0;
601
602 quiesce_z90crypt = 0;
603
604 atomic_set(&total_open, 0);
605 atomic_set(&z90crypt_step, 0);
606
607 /* Set up the cleanup task. */
608 init_timer(&cleanup_timer);
609 cleanup_timer.function = z90crypt_cleanup_task;
610 cleanup_timer.data = 0;
611 cleanup_timer.expires = jiffies + (CLEANUPTIME * HZ);
612 add_timer(&cleanup_timer);
613
614 /* Set up the proc file system */
615 entry = create_proc_entry("driver/z90crypt", 0644, 0);
616 if (entry) {
617 entry->nlink = 1;
618 entry->data = 0;
619 entry->read_proc = z90crypt_status;
620 entry->write_proc = z90crypt_status_write;
621 }
622 else
623 PRINTK("Couldn't create z90crypt proc entry\n");
624 z90crypt_entry = entry;
625
626 /* Set up the configuration task. */
627 init_timer(&config_timer);
628 config_timer.function = z90crypt_config_task;
629 config_timer.data = 0;
630 config_timer.expires = jiffies + (INITIAL_CONFIGTIME * HZ);
631 add_timer(&config_timer);
632
633 /* Set up the reader task */
634 tasklet_init(&reader_tasklet, z90crypt_reader_task, 0);
635 init_timer(&reader_timer);
636 reader_timer.function = z90crypt_schedule_reader_task;
637 reader_timer.data = 0;
638 reader_timer.expires = jiffies + (READERTIME * HZ / 1000);
639 add_timer(&reader_timer);
640
641 return 0; // success
642
643init_module_cleanup:
644 if ((nresult = misc_deregister(&z90crypt_misc_device)))
645 PRINTK("misc_deregister failed with %d.\n", nresult);
646 else
647 PDEBUG("misc_deregister successful.\n");
648
649 return result; // failure
650}
651
652/**
653 * The module termination code
654 */
655static void __exit
656z90crypt_cleanup_module(void)
657{
658 int nresult;
659
660 PDEBUG("PID %d\n", PID());
661
662 remove_proc_entry("driver/z90crypt", 0);
663
664 if ((nresult = misc_deregister(&z90crypt_misc_device)))
665 PRINTK("misc_deregister failed with %d.\n", nresult);
666 else
667 PDEBUG("misc_deregister successful.\n");
668
669 /* Remove the tasks */
670 tasklet_kill(&reader_tasklet);
671 del_timer(&reader_timer);
672 del_timer(&config_timer);
673 del_timer(&cleanup_timer);
674
675 destroy_z90crypt();
676
677 PRINTKN("Unloaded.\n");
678}
679
680/**
681 * Functions running under a process id
682 *
683 * The I/O functions:
684 * z90crypt_open
685 * z90crypt_release
686 * z90crypt_read
687 * z90crypt_write
688 * z90crypt_unlocked_ioctl
689 * z90crypt_status
690 * z90crypt_status_write
691 * disable_card
692 * enable_card
693 *
694 * Helper functions:
695 * z90crypt_rsa
696 * z90crypt_prepare
697 * z90crypt_send
698 * z90crypt_process_results
699 *
700 */
701static int
702z90crypt_open(struct inode *inode, struct file *filp)
703{
704 struct priv_data *private_data_p;
705
706 if (quiesce_z90crypt)
707 return -EQUIESCE;
708
709 private_data_p = kzalloc(sizeof(struct priv_data), GFP_KERNEL);
710 if (!private_data_p) {
711 PRINTK("Memory allocate failed\n");
712 return -ENOMEM;
713 }
714
715 private_data_p->status = STAT_OPEN;
716 private_data_p->opener_pid = PID();
717 filp->private_data = private_data_p;
718 atomic_inc(&total_open);
719
720 return 0;
721}
722
723static int
724z90crypt_release(struct inode *inode, struct file *filp)
725{
726 struct priv_data *private_data_p = filp->private_data;
727
728 PDEBUG("PID %d (filp %p)\n", PID(), filp);
729
730 private_data_p->status = STAT_CLOSED;
731 memset(private_data_p, 0, sizeof(struct priv_data));
732 kfree(private_data_p);
733 atomic_dec(&total_open);
734
735 return 0;
736}
737
738/*
739 * there are two read functions, of which compile options will choose one
740 * without USE_GET_RANDOM_BYTES
741 * => read() always returns -EPERM;
742 * otherwise
743 * => read() uses get_random_bytes() kernel function
744 */
745#ifndef USE_GET_RANDOM_BYTES
746/**
747 * z90crypt_read will not be supported beyond z90crypt 1.3.1
748 */
749static ssize_t
750z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
751{
752 PDEBUG("filp %p (PID %d)\n", filp, PID());
753 return -EPERM;
754}
755#else // we want to use get_random_bytes
756/**
757 * read() just returns a string of random bytes. Since we have no way
758 * to generate these cryptographically, we just execute get_random_bytes
759 * for the length specified.
760 */
761#include <linux/random.h>
762static ssize_t
763z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
764{
765 unsigned char *temp_buff;
766
767 PDEBUG("filp %p (PID %d)\n", filp, PID());
768
769 if (quiesce_z90crypt)
770 return -EQUIESCE;
771 if (count < 0) {
772 PRINTK("Requested random byte count negative: %ld\n", count);
773 return -EINVAL;
774 }
775 if (count > RESPBUFFSIZE) {
776 PDEBUG("count[%d] > RESPBUFFSIZE", count);
777 return -EINVAL;
778 }
779 if (count == 0)
780 return 0;
781 temp_buff = kmalloc(RESPBUFFSIZE, GFP_KERNEL);
782 if (!temp_buff) {
783 PRINTK("Memory allocate failed\n");
784 return -ENOMEM;
785 }
786 get_random_bytes(temp_buff, count);
787
788 if (copy_to_user(buf, temp_buff, count) != 0) {
789 kfree(temp_buff);
790 return -EFAULT;
791 }
792 kfree(temp_buff);
793 return count;
794}
795#endif
796
797/**
798 * Write is is not allowed
799 */
800static ssize_t
801z90crypt_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
802{
803 PDEBUG("filp %p (PID %d)\n", filp, PID());
804 return -EPERM;
805}
806
807/**
808 * New status functions
809 */
810static inline int
811get_status_totalcount(void)
812{
813 return z90crypt.hdware_info->hdware_mask.st_count;
814}
815
816static inline int
817get_status_PCICAcount(void)
818{
819 return z90crypt.hdware_info->type_mask[PCICA].st_count;
820}
821
822static inline int
823get_status_PCICCcount(void)
824{
825 return z90crypt.hdware_info->type_mask[PCICC].st_count;
826}
827
828static inline int
829get_status_PCIXCCcount(void)
830{
831 return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count +
832 z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
833}
834
835static inline int
836get_status_PCIXCCMCL2count(void)
837{
838 return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count;
839}
840
841static inline int
842get_status_PCIXCCMCL3count(void)
843{
844 return z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
845}
846
847static inline int
848get_status_CEX2Ccount(void)
849{
850 return z90crypt.hdware_info->type_mask[CEX2C].st_count;
851}
852
853static inline int
854get_status_CEX2Acount(void)
855{
856 return z90crypt.hdware_info->type_mask[CEX2A].st_count;
857}
858
859static inline int
860get_status_requestq_count(void)
861{
862 return requestq_count;
863}
864
865static inline int
866get_status_pendingq_count(void)
867{
868 return pendingq_count;
869}
870
871static inline int
872get_status_totalopen_count(void)
873{
874 return atomic_read(&total_open);
875}
876
877static inline int
878get_status_domain_index(void)
879{
880 return z90crypt.cdx;
881}
882
883static inline unsigned char *
884get_status_status_mask(unsigned char status[Z90CRYPT_NUM_APS])
885{
886 int i, ix;
887
888 memcpy(status, z90crypt.hdware_info->device_type_array,
889 Z90CRYPT_NUM_APS);
890
891 for (i = 0; i < get_status_totalcount(); i++) {
892 ix = SHRT2LONG(i);
893 if (LONG2DEVPTR(ix)->user_disabled)
894 status[ix] = 0x0d;
895 }
896
897 return status;
898}
899
900static inline unsigned char *
901get_status_qdepth_mask(unsigned char qdepth[Z90CRYPT_NUM_APS])
902{
903 int i, ix;
904
905 memset(qdepth, 0, Z90CRYPT_NUM_APS);
906
907 for (i = 0; i < get_status_totalcount(); i++) {
908 ix = SHRT2LONG(i);
909 qdepth[ix] = LONG2DEVPTR(ix)->dev_caller_count;
910 }
911
912 return qdepth;
913}
914
915static inline unsigned int *
916get_status_perdevice_reqcnt(unsigned int reqcnt[Z90CRYPT_NUM_APS])
917{
918 int i, ix;
919
920 memset(reqcnt, 0, Z90CRYPT_NUM_APS * sizeof(int));
921
922 for (i = 0; i < get_status_totalcount(); i++) {
923 ix = SHRT2LONG(i);
924 reqcnt[ix] = LONG2DEVPTR(ix)->dev_total_req_cnt;
925 }
926
927 return reqcnt;
928}
929
930static inline void
931init_work_element(struct work_element *we_p,
932 struct priv_data *priv_data, pid_t pid)
933{
934 int step;
935
936 we_p->requestptr = (unsigned char *)we_p + sizeof(struct work_element);
937 /* Come up with a unique id for this caller. */
938 step = atomic_inc_return(&z90crypt_step);
939 memcpy(we_p->caller_id+0, (void *) &pid, sizeof(pid));
940 memcpy(we_p->caller_id+4, (void *) &step, sizeof(step));
941 we_p->pid = pid;
942 we_p->priv_data = priv_data;
943 we_p->status[0] = STAT_DEFAULT;
944 we_p->audit[0] = 0x00;
945 we_p->audit[1] = 0x00;
946 we_p->audit[2] = 0x00;
947 we_p->resp_buff_size = 0;
948 we_p->retcode = 0;
949 we_p->devindex = -1;
950 we_p->devtype = -1;
951 atomic_set(&we_p->alarmrung, 0);
952 init_waitqueue_head(&we_p->waitq);
953 INIT_LIST_HEAD(&(we_p->liste));
954}
955
956static inline int
957allocate_work_element(struct work_element **we_pp,
958 struct priv_data *priv_data_p, pid_t pid)
959{
960 struct work_element *we_p;
961
962 we_p = (struct work_element *) get_zeroed_page(GFP_KERNEL);
963 if (!we_p)
964 return -ENOMEM;
965 init_work_element(we_p, priv_data_p, pid);
966 *we_pp = we_p;
967 return 0;
968}
969
970static inline void
971remove_device(struct device *device_p)
972{
973 if (!device_p || (device_p->disabled != 0))
974 return;
975 device_p->disabled = 1;
976 z90crypt.hdware_info->type_mask[device_p->dev_type].disabled_count++;
977 z90crypt.hdware_info->hdware_mask.disabled_count++;
978}
979
980/**
981 * Bitlength limits for each card
982 *
983 * There are new MCLs which allow more bitlengths. See the table for details.
984 * The MCL must be applied and the newer bitlengths enabled for these to work.
985 *
986 * Card Type Old limit New limit
987 * PCICA ??-2048 same (the lower limit is less than 128 bit...)
988 * PCICC 512-1024 512-2048
989 * PCIXCC_MCL2 512-2048 ----- (applying any GA LIC will make an MCL3 card)
990 * PCIXCC_MCL3 ----- 128-2048
991 * CEX2C 512-2048 128-2048
992 * CEX2A ??-2048 same (the lower limit is less than 128 bit...)
993 *
994 * ext_bitlens (extended bitlengths) is a global, since you should not apply an
995 * MCL to just one card in a machine. We assume, at first, that all cards have
996 * these capabilities.
997 */
998int ext_bitlens = 1; // This is global
999#define PCIXCC_MIN_MOD_SIZE 16 // 128 bits
1000#define OLD_PCIXCC_MIN_MOD_SIZE 64 // 512 bits
1001#define PCICC_MIN_MOD_SIZE 64 // 512 bits
1002#define OLD_PCICC_MAX_MOD_SIZE 128 // 1024 bits
1003#define MAX_MOD_SIZE 256 // 2048 bits
1004
1005static inline int
1006select_device_type(int *dev_type_p, int bytelength)
1007{
1008 static int count = 0;
1009 int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, CEX2A_avail,
1010 index_to_use;
1011 struct status *stat;
1012 if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) &&
1013 (*dev_type_p != PCIXCC_MCL2) && (*dev_type_p != PCIXCC_MCL3) &&
1014 (*dev_type_p != CEX2C) && (*dev_type_p != CEX2A) &&
1015 (*dev_type_p != ANYDEV))
1016 return -1;
1017 if (*dev_type_p != ANYDEV) {
1018 stat = &z90crypt.hdware_info->type_mask[*dev_type_p];
1019 if (stat->st_count >
1020 (stat->disabled_count + stat->user_disabled_count))
1021 return 0;
1022 return -1;
1023 }
1024
1025 /**
1026 * Assumption: PCICA, PCIXCC_MCL3, CEX2C, and CEX2A are all similar in
1027 * speed.
1028 *
1029 * PCICA and CEX2A do NOT co-exist, so it would be either one or the
1030 * other present.
1031 */
1032 stat = &z90crypt.hdware_info->type_mask[PCICA];
1033 PCICA_avail = stat->st_count -
1034 (stat->disabled_count + stat->user_disabled_count);
1035 stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL3];
1036 PCIXCC_MCL3_avail = stat->st_count -
1037 (stat->disabled_count + stat->user_disabled_count);
1038 stat = &z90crypt.hdware_info->type_mask[CEX2C];
1039 CEX2C_avail = stat->st_count -
1040 (stat->disabled_count + stat->user_disabled_count);
1041 stat = &z90crypt.hdware_info->type_mask[CEX2A];
1042 CEX2A_avail = stat->st_count -
1043 (stat->disabled_count + stat->user_disabled_count);
1044 if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail || CEX2A_avail) {
1045 /**
1046 * bitlength is a factor, PCICA or CEX2A are the most capable,
1047 * even with the new MCL for PCIXCC.
1048 */
1049 if ((bytelength < PCIXCC_MIN_MOD_SIZE) ||
1050 (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) {
1051 if (PCICA_avail) {
1052 *dev_type_p = PCICA;
1053 return 0;
1054 }
1055 if (CEX2A_avail) {
1056 *dev_type_p = CEX2A;
1057 return 0;
1058 }
1059 return -1;
1060 }
1061
1062 index_to_use = count % (PCICA_avail + PCIXCC_MCL3_avail +
1063 CEX2C_avail + CEX2A_avail);
1064 if (index_to_use < PCICA_avail)
1065 *dev_type_p = PCICA;
1066 else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail))
1067 *dev_type_p = PCIXCC_MCL3;
1068 else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail +
1069 CEX2C_avail))
1070 *dev_type_p = CEX2C;
1071 else
1072 *dev_type_p = CEX2A;
1073 count++;
1074 return 0;
1075 }
1076
1077 /* Less than OLD_PCIXCC_MIN_MOD_SIZE cannot go to a PCIXCC_MCL2 */
1078 if (bytelength < OLD_PCIXCC_MIN_MOD_SIZE)
1079 return -1;
1080 stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL2];
1081 if (stat->st_count >
1082 (stat->disabled_count + stat->user_disabled_count)) {
1083 *dev_type_p = PCIXCC_MCL2;
1084 return 0;
1085 }
1086
1087 /**
1088 * Less than PCICC_MIN_MOD_SIZE or more than OLD_PCICC_MAX_MOD_SIZE
1089 * (if we don't have the MCL applied and the newer bitlengths enabled)
1090 * cannot go to a PCICC
1091 */
1092 if ((bytelength < PCICC_MIN_MOD_SIZE) ||
1093 (!ext_bitlens && (bytelength > OLD_PCICC_MAX_MOD_SIZE))) {
1094 return -1;
1095 }
1096 stat = &z90crypt.hdware_info->type_mask[PCICC];
1097 if (stat->st_count >
1098 (stat->disabled_count + stat->user_disabled_count)) {
1099 *dev_type_p = PCICC;
1100 return 0;
1101 }
1102
1103 return -1;
1104}
1105
1106/**
1107 * Try the selected number, then the selected type (can be ANYDEV)
1108 */
1109static inline int
1110select_device(int *dev_type_p, int *device_nr_p, int bytelength)
1111{
1112 int i, indx, devTp, low_count, low_indx;
1113 struct device_x *index_p;
1114 struct device *dev_ptr;
1115
1116 PDEBUG("device type = %d, index = %d\n", *dev_type_p, *device_nr_p);
1117 if ((*device_nr_p >= 0) && (*device_nr_p < Z90CRYPT_NUM_DEVS)) {
1118 PDEBUG("trying index = %d\n", *device_nr_p);
1119 dev_ptr = z90crypt.device_p[*device_nr_p];
1120
1121 if (dev_ptr &&
1122 (dev_ptr->dev_stat != DEV_GONE) &&
1123 (dev_ptr->disabled == 0) &&
1124 (dev_ptr->user_disabled == 0)) {
1125 PDEBUG("selected by number, index = %d\n",
1126 *device_nr_p);
1127 *dev_type_p = dev_ptr->dev_type;
1128 return *device_nr_p;
1129 }
1130 }
1131 *device_nr_p = -1;
1132 PDEBUG("trying type = %d\n", *dev_type_p);
1133 devTp = *dev_type_p;
1134 if (select_device_type(&devTp, bytelength) == -1) {
1135 PDEBUG("failed to select by type\n");
1136 return -1;
1137 }
1138 PDEBUG("selected type = %d\n", devTp);
1139 index_p = &z90crypt.hdware_info->type_x_addr[devTp];
1140 low_count = 0x0000FFFF;
1141 low_indx = -1;
1142 for (i = 0; i < z90crypt.hdware_info->type_mask[devTp].st_count; i++) {
1143 indx = index_p->device_index[i];
1144 dev_ptr = z90crypt.device_p[indx];
1145 if (dev_ptr &&
1146 (dev_ptr->dev_stat != DEV_GONE) &&
1147 (dev_ptr->disabled == 0) &&
1148 (dev_ptr->user_disabled == 0) &&
1149 (devTp == dev_ptr->dev_type) &&
1150 (low_count > dev_ptr->dev_caller_count)) {
1151 low_count = dev_ptr->dev_caller_count;
1152 low_indx = indx;
1153 }
1154 }
1155 *device_nr_p = low_indx;
1156 return low_indx;
1157}
1158
1159static inline int
1160send_to_crypto_device(struct work_element *we_p)
1161{
1162 struct caller *caller_p;
1163 struct device *device_p;
1164 int dev_nr;
1165 int bytelen = ((struct ica_rsa_modexpo *)we_p->buffer)->inputdatalength;
1166
1167 if (!we_p->requestptr)
1168 return SEN_FATAL_ERROR;
1169 caller_p = (struct caller *)we_p->requestptr;
1170 dev_nr = we_p->devindex;
1171 if (select_device(&we_p->devtype, &dev_nr, bytelen) == -1) {
1172 if (z90crypt.hdware_info->hdware_mask.st_count != 0)
1173 return SEN_RETRY;
1174 else
1175 return SEN_NOT_AVAIL;
1176 }
1177 we_p->devindex = dev_nr;
1178 device_p = z90crypt.device_p[dev_nr];
1179 if (!device_p)
1180 return SEN_NOT_AVAIL;
1181 if (device_p->dev_type != we_p->devtype)
1182 return SEN_RETRY;
1183 if (device_p->dev_caller_count >= device_p->dev_q_depth)
1184 return SEN_QUEUE_FULL;
1185 PDEBUG("device number prior to send: %d\n", dev_nr);
1186 switch (send_to_AP(dev_nr, z90crypt.cdx,
1187 caller_p->caller_dev_dep_req_l,
1188 caller_p->caller_dev_dep_req_p)) {
1189 case DEV_SEN_EXCEPTION:
1190 PRINTKC("Exception during send to device %d\n", dev_nr);
1191 z90crypt.terminating = 1;
1192 return SEN_FATAL_ERROR;
1193 case DEV_GONE:
1194 PRINTK("Device %d not available\n", dev_nr);
1195 remove_device(device_p);
1196 return SEN_NOT_AVAIL;
1197 case DEV_EMPTY:
1198 return SEN_NOT_AVAIL;
1199 case DEV_NO_WORK:
1200 return SEN_FATAL_ERROR;
1201 case DEV_BAD_MESSAGE:
1202 return SEN_USER_ERROR;
1203 case DEV_QUEUE_FULL:
1204 return SEN_QUEUE_FULL;
1205 default:
1206 case DEV_ONLINE:
1207 break;
1208 }
1209 list_add_tail(&(caller_p->caller_liste), &(device_p->dev_caller_list));
1210 device_p->dev_caller_count++;
1211 return 0;
1212}
1213
1214/**
1215 * Send puts the user's work on one of two queues:
1216 * the pending queue if the send was successful
1217 * the request queue if the send failed because device full or busy
1218 */
1219static inline int
1220z90crypt_send(struct work_element *we_p, const char *buf)
1221{
1222 int rv;
1223
1224 PDEBUG("PID %d\n", PID());
1225
1226 if (CHK_RDWRMASK(we_p->status[0]) != STAT_NOWORK) {
1227 PDEBUG("PID %d tried to send more work but has outstanding "
1228 "work.\n", PID());
1229 return -EWORKPEND;
1230 }
1231 we_p->devindex = -1; // Reset device number
1232 spin_lock_irq(&queuespinlock);
1233 rv = send_to_crypto_device(we_p);
1234 switch (rv) {
1235 case 0:
1236 we_p->requestsent = jiffies;
1237 we_p->audit[0] |= FP_SENT;
1238 list_add_tail(&we_p->liste, &pending_list);
1239 ++pendingq_count;
1240 we_p->audit[0] |= FP_PENDING;
1241 break;
1242 case SEN_BUSY:
1243 case SEN_QUEUE_FULL:
1244 rv = 0;
1245 we_p->devindex = -1; // any device will do
1246 we_p->requestsent = jiffies;
1247 list_add_tail(&we_p->liste, &request_list);
1248 ++requestq_count;
1249 we_p->audit[0] |= FP_REQUEST;
1250 break;
1251 case SEN_RETRY:
1252 rv = -ERESTARTSYS;
1253 break;
1254 case SEN_NOT_AVAIL:
1255 PRINTK("*** No devices available.\n");
1256 rv = we_p->retcode = -ENODEV;
1257 we_p->status[0] |= STAT_FAILED;
1258 break;
1259 case REC_OPERAND_INV:
1260 case REC_OPERAND_SIZE:
1261 case REC_EVEN_MOD:
1262 case REC_INVALID_PAD:
1263 rv = we_p->retcode = -EINVAL;
1264 we_p->status[0] |= STAT_FAILED;
1265 break;
1266 default:
1267 we_p->retcode = rv;
1268 we_p->status[0] |= STAT_FAILED;
1269 break;
1270 }
1271 if (rv != -ERESTARTSYS)
1272 SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
1273 spin_unlock_irq(&queuespinlock);
1274 if (rv == 0)
1275 tasklet_schedule(&reader_tasklet);
1276 return rv;
1277}
1278
1279/**
1280 * process_results copies the user's work from kernel space.
1281 */
1282static inline int
1283z90crypt_process_results(struct work_element *we_p, char __user *buf)
1284{
1285 int rv;
1286
1287 PDEBUG("we_p %p (PID %d)\n", we_p, PID());
1288
1289 LONG2DEVPTR(we_p->devindex)->dev_total_req_cnt++;
1290 SET_RDWRMASK(we_p->status[0], STAT_READPEND);
1291
1292 rv = 0;
1293 if (!we_p->buffer) {
1294 PRINTK("we_p %p PID %d in STAT_READPEND: buffer NULL.\n",
1295 we_p, PID());
1296 rv = -ENOBUFF;
1297 }
1298
1299 if (!rv)
1300 if ((rv = copy_to_user(buf, we_p->buffer, we_p->buff_size))) {
1301 PDEBUG("copy_to_user failed: rv = %d\n", rv);
1302 rv = -EFAULT;
1303 }
1304
1305 if (!rv)
1306 rv = we_p->retcode;
1307 if (!rv)
1308 if (we_p->resp_buff_size
1309 && copy_to_user(we_p->resp_addr, we_p->resp_buff,
1310 we_p->resp_buff_size))
1311 rv = -EFAULT;
1312
1313 SET_RDWRMASK(we_p->status[0], STAT_NOWORK);
1314 return rv;
1315}
1316
1317static unsigned char NULL_psmid[8] =
1318{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1319
1320/**
1321 * Used in device configuration functions
1322 */
1323#define MAX_RESET 90
1324
1325/**
1326 * This is used only for PCICC support
1327 */
1328static inline int
1329is_PKCS11_padded(unsigned char *buffer, int length)
1330{
1331 int i;
1332 if ((buffer[0] != 0x00) || (buffer[1] != 0x01))
1333 return 0;
1334 for (i = 2; i < length; i++)
1335 if (buffer[i] != 0xFF)
1336 break;
1337 if ((i < 10) || (i == length))
1338 return 0;
1339 if (buffer[i] != 0x00)
1340 return 0;
1341 return 1;
1342}
1343
1344/**
1345 * This is used only for PCICC support
1346 */
1347static inline int
1348is_PKCS12_padded(unsigned char *buffer, int length)
1349{
1350 int i;
1351 if ((buffer[0] != 0x00) || (buffer[1] != 0x02))
1352 return 0;
1353 for (i = 2; i < length; i++)
1354 if (buffer[i] == 0x00)
1355 break;
1356 if ((i < 10) || (i == length))
1357 return 0;
1358 if (buffer[i] != 0x00)
1359 return 0;
1360 return 1;
1361}
1362
1363/**
1364 * builds struct caller and converts message from generic format to
1365 * device-dependent format
1366 * func is ICARSAMODEXPO or ICARSACRT
1367 * function is PCI_FUNC_KEY_ENCRYPT or PCI_FUNC_KEY_DECRYPT
1368 */
1369static inline int
1370build_caller(struct work_element *we_p, short function)
1371{
1372 int rv;
1373 struct caller *caller_p = (struct caller *)we_p->requestptr;
1374
1375 if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) &&
1376 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
1377 (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A))
1378 return SEN_NOT_AVAIL;
1379
1380 memcpy(caller_p->caller_id, we_p->caller_id,
1381 sizeof(caller_p->caller_id));
1382 caller_p->caller_dev_dep_req_p = caller_p->caller_dev_dep_req;
1383 caller_p->caller_dev_dep_req_l = MAX_RESPONSE_SIZE;
1384 caller_p->caller_buf_p = we_p->buffer;
1385 INIT_LIST_HEAD(&(caller_p->caller_liste));
1386
1387 rv = convert_request(we_p->buffer, we_p->funccode, function,
1388 z90crypt.cdx, we_p->devtype,
1389 &caller_p->caller_dev_dep_req_l,
1390 caller_p->caller_dev_dep_req_p);
1391 if (rv) {
1392 if (rv == SEN_NOT_AVAIL)
1393 PDEBUG("request can't be processed on hdwr avail\n");
1394 else
1395 PRINTK("Error from convert_request: %d\n", rv);
1396 }
1397 else
1398 memcpy(&(caller_p->caller_dev_dep_req_p[4]), we_p->caller_id,8);
1399 return rv;
1400}
1401
1402static inline void
1403unbuild_caller(struct device *device_p, struct caller *caller_p)
1404{
1405 if (!caller_p)
1406 return;
1407 if (caller_p->caller_liste.next && caller_p->caller_liste.prev)
1408 if (!list_empty(&caller_p->caller_liste)) {
1409 list_del_init(&caller_p->caller_liste);
1410 device_p->dev_caller_count--;
1411 }
1412 memset(caller_p->caller_id, 0, sizeof(caller_p->caller_id));
1413}
1414
1415static inline int
1416get_crypto_request_buffer(struct work_element *we_p)
1417{
1418 struct ica_rsa_modexpo *mex_p;
1419 struct ica_rsa_modexpo_crt *crt_p;
1420 unsigned char *temp_buffer;
1421 short function;
1422 int rv;
1423
1424 mex_p = (struct ica_rsa_modexpo *) we_p->buffer;
1425 crt_p = (struct ica_rsa_modexpo_crt *) we_p->buffer;
1426
1427 PDEBUG("device type input = %d\n", we_p->devtype);
1428
1429 if (z90crypt.terminating)
1430 return REC_NO_RESPONSE;
1431 if (memcmp(we_p->caller_id, NULL_psmid, 8) == 0) {
1432 PRINTK("psmid zeroes\n");
1433 return SEN_FATAL_ERROR;
1434 }
1435 if (!we_p->buffer) {
1436 PRINTK("buffer pointer NULL\n");
1437 return SEN_USER_ERROR;
1438 }
1439 if (!we_p->requestptr) {
1440 PRINTK("caller pointer NULL\n");
1441 return SEN_USER_ERROR;
1442 }
1443
1444 if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) &&
1445 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
1446 (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A) &&
1447 (we_p->devtype != ANYDEV)) {
1448 PRINTK("invalid device type\n");
1449 return SEN_USER_ERROR;
1450 }
1451
1452 if ((mex_p->inputdatalength < 1) ||
1453 (mex_p->inputdatalength > MAX_MOD_SIZE)) {
1454 PRINTK("inputdatalength[%d] is not valid\n",
1455 mex_p->inputdatalength);
1456 return SEN_USER_ERROR;
1457 }
1458
1459 if (mex_p->outputdatalength < mex_p->inputdatalength) {
1460 PRINTK("outputdatalength[%d] < inputdatalength[%d]\n",
1461 mex_p->outputdatalength, mex_p->inputdatalength);
1462 return SEN_USER_ERROR;
1463 }
1464
1465 if (!mex_p->inputdata || !mex_p->outputdata) {
1466 PRINTK("inputdata[%p] or outputdata[%p] is NULL\n",
1467 mex_p->outputdata, mex_p->inputdata);
1468 return SEN_USER_ERROR;
1469 }
1470
1471 /**
1472 * As long as outputdatalength is big enough, we can set the
1473 * outputdatalength equal to the inputdatalength, since that is the
1474 * number of bytes we will copy in any case
1475 */
1476 mex_p->outputdatalength = mex_p->inputdatalength;
1477
1478 rv = 0;
1479 switch (we_p->funccode) {
1480 case ICARSAMODEXPO:
1481 if (!mex_p->b_key || !mex_p->n_modulus)
1482 rv = SEN_USER_ERROR;
1483 break;
1484 case ICARSACRT:
1485 if (!IS_EVEN(crt_p->inputdatalength)) {
1486 PRINTK("inputdatalength[%d] is odd, CRT form\n",
1487 crt_p->inputdatalength);
1488 rv = SEN_USER_ERROR;
1489 break;
1490 }
1491 if (!crt_p->bp_key ||
1492 !crt_p->bq_key ||
1493 !crt_p->np_prime ||
1494 !crt_p->nq_prime ||
1495 !crt_p->u_mult_inv) {
1496 PRINTK("CRT form, bad data: %p/%p/%p/%p/%p\n",
1497 crt_p->bp_key, crt_p->bq_key,
1498 crt_p->np_prime, crt_p->nq_prime,
1499 crt_p->u_mult_inv);
1500 rv = SEN_USER_ERROR;
1501 }
1502 break;
1503 default:
1504 PRINTK("bad func = %d\n", we_p->funccode);
1505 rv = SEN_USER_ERROR;
1506 break;
1507 }
1508 if (rv != 0)
1509 return rv;
1510
1511 if (select_device_type(&we_p->devtype, mex_p->inputdatalength) < 0)
1512 return SEN_NOT_AVAIL;
1513
1514 temp_buffer = (unsigned char *)we_p + sizeof(struct work_element) +
1515 sizeof(struct caller);
1516 if (copy_from_user(temp_buffer, mex_p->inputdata,
1517 mex_p->inputdatalength) != 0)
1518 return SEN_RELEASED;
1519
1520 function = PCI_FUNC_KEY_ENCRYPT;
1521 switch (we_p->devtype) {
1522 /* PCICA and CEX2A do everything with a simple RSA mod-expo operation */
1523 case PCICA:
1524 case CEX2A:
1525 function = PCI_FUNC_KEY_ENCRYPT;
1526 break;
1527 /**
1528 * PCIXCC_MCL2 does all Mod-Expo form with a simple RSA mod-expo
1529 * operation, and all CRT forms with a PKCS-1.2 format decrypt.
1530 * PCIXCC_MCL3 and CEX2C do all Mod-Expo and CRT forms with a simple RSA
1531 * mod-expo operation
1532 */
1533 case PCIXCC_MCL2:
1534 if (we_p->funccode == ICARSAMODEXPO)
1535 function = PCI_FUNC_KEY_ENCRYPT;
1536 else
1537 function = PCI_FUNC_KEY_DECRYPT;
1538 break;
1539 case PCIXCC_MCL3:
1540 case CEX2C:
1541 if (we_p->funccode == ICARSAMODEXPO)
1542 function = PCI_FUNC_KEY_ENCRYPT;
1543 else
1544 function = PCI_FUNC_KEY_DECRYPT;
1545 break;
1546 /**
1547 * PCICC does everything as a PKCS-1.2 format request
1548 */
1549 case PCICC:
1550 /* PCICC cannot handle input that is is PKCS#1.1 padded */
1551 if (is_PKCS11_padded(temp_buffer, mex_p->inputdatalength)) {
1552 return SEN_NOT_AVAIL;
1553 }
1554 if (we_p->funccode == ICARSAMODEXPO) {
1555 if (is_PKCS12_padded(temp_buffer,
1556 mex_p->inputdatalength))
1557 function = PCI_FUNC_KEY_ENCRYPT;
1558 else
1559 function = PCI_FUNC_KEY_DECRYPT;
1560 } else
1561 /* all CRT forms are decrypts */
1562 function = PCI_FUNC_KEY_DECRYPT;
1563 break;
1564 }
1565 PDEBUG("function: %04x\n", function);
1566 rv = build_caller(we_p, function);
1567 PDEBUG("rv from build_caller = %d\n", rv);
1568 return rv;
1569}
1570
1571static inline int
1572z90crypt_prepare(struct work_element *we_p, unsigned int funccode,
1573 const char __user *buffer)
1574{
1575 int rv;
1576
1577 we_p->devindex = -1;
1578 if (funccode == ICARSAMODEXPO)
1579 we_p->buff_size = sizeof(struct ica_rsa_modexpo);
1580 else
1581 we_p->buff_size = sizeof(struct ica_rsa_modexpo_crt);
1582
1583 if (copy_from_user(we_p->buffer, buffer, we_p->buff_size))
1584 return -EFAULT;
1585
1586 we_p->audit[0] |= FP_COPYFROM;
1587 SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
1588 we_p->funccode = funccode;
1589 we_p->devtype = -1;
1590 we_p->audit[0] |= FP_BUFFREQ;
1591 rv = get_crypto_request_buffer(we_p);
1592 switch (rv) {
1593 case 0:
1594 we_p->audit[0] |= FP_BUFFGOT;
1595 break;
1596 case SEN_USER_ERROR:
1597 rv = -EINVAL;
1598 break;
1599 case SEN_QUEUE_FULL:
1600 rv = 0;
1601 break;
1602 case SEN_RELEASED:
1603 rv = -EFAULT;
1604 break;
1605 case REC_NO_RESPONSE:
1606 rv = -ENODEV;
1607 break;
1608 case SEN_NOT_AVAIL:
1609 case EGETBUFF:
1610 rv = -EGETBUFF;
1611 break;
1612 default:
1613 PRINTK("rv = %d\n", rv);
1614 rv = -EGETBUFF;
1615 break;
1616 }
1617 if (CHK_RDWRMASK(we_p->status[0]) == STAT_WRITTEN)
1618 SET_RDWRMASK(we_p->status[0], STAT_DEFAULT);
1619 return rv;
1620}
1621
1622static inline void
1623purge_work_element(struct work_element *we_p)
1624{
1625 struct list_head *lptr;
1626
1627 spin_lock_irq(&queuespinlock);
1628 list_for_each(lptr, &request_list) {
1629 if (lptr == &we_p->liste) {
1630 list_del_init(lptr);
1631 requestq_count--;
1632 break;
1633 }
1634 }
1635 list_for_each(lptr, &pending_list) {
1636 if (lptr == &we_p->liste) {
1637 list_del_init(lptr);
1638 pendingq_count--;
1639 break;
1640 }
1641 }
1642 spin_unlock_irq(&queuespinlock);
1643}
1644
1645/**
1646 * Build the request and send it.
1647 */
1648static inline int
1649z90crypt_rsa(struct priv_data *private_data_p, pid_t pid,
1650 unsigned int cmd, unsigned long arg)
1651{
1652 struct work_element *we_p;
1653 int rv;
1654
1655 if ((rv = allocate_work_element(&we_p, private_data_p, pid))) {
1656 PDEBUG("PID %d: allocate_work_element returned ENOMEM\n", pid);
1657 return rv;
1658 }
1659 if ((rv = z90crypt_prepare(we_p, cmd, (const char __user *)arg)))
1660 PDEBUG("PID %d: rv = %d from z90crypt_prepare\n", pid, rv);
1661 if (!rv)
1662 if ((rv = z90crypt_send(we_p, (const char *)arg)))
1663 PDEBUG("PID %d: rv %d from z90crypt_send.\n", pid, rv);
1664 if (!rv) {
1665 we_p->audit[0] |= FP_ASLEEP;
1666 wait_event(we_p->waitq, atomic_read(&we_p->alarmrung));
1667 we_p->audit[0] |= FP_AWAKE;
1668 rv = we_p->retcode;
1669 }
1670 if (!rv)
1671 rv = z90crypt_process_results(we_p, (char __user *)arg);
1672
1673 if ((we_p->status[0] & STAT_FAILED)) {
1674 switch (rv) {
1675 /**
1676 * EINVAL *after* receive is almost always a padding error or
1677 * length error issued by a coprocessor (not an accelerator).
1678 * We convert this return value to -EGETBUFF which should
1679 * trigger a fallback to software.
1680 */
1681 case -EINVAL:
1682 if ((we_p->devtype != PCICA) &&
1683 (we_p->devtype != CEX2A))
1684 rv = -EGETBUFF;
1685 break;
1686 case -ETIMEOUT:
1687 if (z90crypt.mask.st_count > 0)
1688 rv = -ERESTARTSYS; // retry with another
1689 else
1690 rv = -ENODEV; // no cards left
1691 /* fall through to clean up request queue */
1692 case -ERESTARTSYS:
1693 case -ERELEASED:
1694 switch (CHK_RDWRMASK(we_p->status[0])) {
1695 case STAT_WRITTEN:
1696 purge_work_element(we_p);
1697 break;
1698 case STAT_READPEND:
1699 case STAT_NOWORK:
1700 default:
1701 break;
1702 }
1703 break;
1704 default:
1705 we_p->status[0] ^= STAT_FAILED;
1706 break;
1707 }
1708 }
1709 free_page((long)we_p);
1710 return rv;
1711}
1712
1713/**
1714 * This function is a little long, but it's really just one large switch
1715 * statement.
1716 */
1717static long
1718z90crypt_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1719{
1720 struct priv_data *private_data_p = filp->private_data;
1721 unsigned char *status;
1722 unsigned char *qdepth;
1723 unsigned int *reqcnt;
1724 struct ica_z90_status *pstat;
1725 int ret, i, loopLim, tempstat;
1726 static int deprecated_msg_count1 = 0;
1727 static int deprecated_msg_count2 = 0;
1728
1729 PDEBUG("filp %p (PID %d), cmd 0x%08X\n", filp, PID(), cmd);
1730 PDEBUG("cmd 0x%08X: dir %s, size 0x%04X, type 0x%02X, nr 0x%02X\n",
1731 cmd,
1732 !_IOC_DIR(cmd) ? "NO"
1733 : ((_IOC_DIR(cmd) == (_IOC_READ|_IOC_WRITE)) ? "RW"
1734 : ((_IOC_DIR(cmd) == _IOC_READ) ? "RD"
1735 : "WR")),
1736 _IOC_SIZE(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd));
1737
1738 if (_IOC_TYPE(cmd) != Z90_IOCTL_MAGIC) {
1739 PRINTK("cmd 0x%08X contains bad magic\n", cmd);
1740 return -ENOTTY;
1741 }
1742
1743 ret = 0;
1744 switch (cmd) {
1745 case ICARSAMODEXPO:
1746 case ICARSACRT:
1747 if (quiesce_z90crypt) {
1748 ret = -EQUIESCE;
1749 break;
1750 }
1751 ret = -ENODEV; // Default if no devices
1752 loopLim = z90crypt.hdware_info->hdware_mask.st_count -
1753 (z90crypt.hdware_info->hdware_mask.disabled_count +
1754 z90crypt.hdware_info->hdware_mask.user_disabled_count);
1755 for (i = 0; i < loopLim; i++) {
1756 ret = z90crypt_rsa(private_data_p, PID(), cmd, arg);
1757 if (ret != -ERESTARTSYS)
1758 break;
1759 }
1760 if (ret == -ERESTARTSYS)
1761 ret = -ENODEV;
1762 break;
1763
1764 case Z90STAT_TOTALCOUNT:
1765 tempstat = get_status_totalcount();
1766 if (copy_to_user((int __user *)arg, &tempstat,sizeof(int)) != 0)
1767 ret = -EFAULT;
1768 break;
1769
1770 case Z90STAT_PCICACOUNT:
1771 tempstat = get_status_PCICAcount();
1772 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1773 ret = -EFAULT;
1774 break;
1775
1776 case Z90STAT_PCICCCOUNT:
1777 tempstat = get_status_PCICCcount();
1778 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1779 ret = -EFAULT;
1780 break;
1781
1782 case Z90STAT_PCIXCCMCL2COUNT:
1783 tempstat = get_status_PCIXCCMCL2count();
1784 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1785 ret = -EFAULT;
1786 break;
1787
1788 case Z90STAT_PCIXCCMCL3COUNT:
1789 tempstat = get_status_PCIXCCMCL3count();
1790 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1791 ret = -EFAULT;
1792 break;
1793
1794 case Z90STAT_CEX2CCOUNT:
1795 tempstat = get_status_CEX2Ccount();
1796 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1797 ret = -EFAULT;
1798 break;
1799
1800 case Z90STAT_CEX2ACOUNT:
1801 tempstat = get_status_CEX2Acount();
1802 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1803 ret = -EFAULT;
1804 break;
1805
1806 case Z90STAT_REQUESTQ_COUNT:
1807 tempstat = get_status_requestq_count();
1808 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1809 ret = -EFAULT;
1810 break;
1811
1812 case Z90STAT_PENDINGQ_COUNT:
1813 tempstat = get_status_pendingq_count();
1814 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1815 ret = -EFAULT;
1816 break;
1817
1818 case Z90STAT_TOTALOPEN_COUNT:
1819 tempstat = get_status_totalopen_count();
1820 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1821 ret = -EFAULT;
1822 break;
1823
1824 case Z90STAT_DOMAIN_INDEX:
1825 tempstat = get_status_domain_index();
1826 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1827 ret = -EFAULT;
1828 break;
1829
1830 case Z90STAT_STATUS_MASK:
1831 status = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
1832 if (!status) {
1833 PRINTK("kmalloc for status failed!\n");
1834 ret = -ENOMEM;
1835 break;
1836 }
1837 get_status_status_mask(status);
1838 if (copy_to_user((char __user *) arg, status, Z90CRYPT_NUM_APS)
1839 != 0)
1840 ret = -EFAULT;
1841 kfree(status);
1842 break;
1843
1844 case Z90STAT_QDEPTH_MASK:
1845 qdepth = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
1846 if (!qdepth) {
1847 PRINTK("kmalloc for qdepth failed!\n");
1848 ret = -ENOMEM;
1849 break;
1850 }
1851 get_status_qdepth_mask(qdepth);
1852 if (copy_to_user((char __user *) arg, qdepth, Z90CRYPT_NUM_APS) != 0)
1853 ret = -EFAULT;
1854 kfree(qdepth);
1855 break;
1856
1857 case Z90STAT_PERDEV_REQCNT:
1858 reqcnt = kmalloc(sizeof(int) * Z90CRYPT_NUM_APS, GFP_KERNEL);
1859 if (!reqcnt) {
1860 PRINTK("kmalloc for reqcnt failed!\n");
1861 ret = -ENOMEM;
1862 break;
1863 }
1864 get_status_perdevice_reqcnt(reqcnt);
1865 if (copy_to_user((char __user *) arg, reqcnt,
1866 Z90CRYPT_NUM_APS * sizeof(int)) != 0)
1867 ret = -EFAULT;
1868 kfree(reqcnt);
1869 break;
1870
1871 /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
1872 case ICAZ90STATUS:
1873 if (deprecated_msg_count1 < 20) {
1874 PRINTK("deprecated call to ioctl (ICAZ90STATUS)!\n");
1875 deprecated_msg_count1++;
1876 if (deprecated_msg_count1 == 20)
1877 PRINTK("No longer issuing messages related to "
1878 "deprecated call to ICAZ90STATUS.\n");
1879 }
1880
1881 pstat = kmalloc(sizeof(struct ica_z90_status), GFP_KERNEL);
1882 if (!pstat) {
1883 PRINTK("kmalloc for pstat failed!\n");
1884 ret = -ENOMEM;
1885 break;
1886 }
1887
1888 pstat->totalcount = get_status_totalcount();
1889 pstat->leedslitecount = get_status_PCICAcount();
1890 pstat->leeds2count = get_status_PCICCcount();
1891 pstat->requestqWaitCount = get_status_requestq_count();
1892 pstat->pendingqWaitCount = get_status_pendingq_count();
1893 pstat->totalOpenCount = get_status_totalopen_count();
1894 pstat->cryptoDomain = get_status_domain_index();
1895 get_status_status_mask(pstat->status);
1896 get_status_qdepth_mask(pstat->qdepth);
1897
1898 if (copy_to_user((struct ica_z90_status __user *) arg, pstat,
1899 sizeof(struct ica_z90_status)) != 0)
1900 ret = -EFAULT;
1901 kfree(pstat);
1902 break;
1903
1904 /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
1905 case Z90STAT_PCIXCCCOUNT:
1906 if (deprecated_msg_count2 < 20) {
1907 PRINTK("deprecated ioctl (Z90STAT_PCIXCCCOUNT)!\n");
1908 deprecated_msg_count2++;
1909 if (deprecated_msg_count2 == 20)
1910 PRINTK("No longer issuing messages about depre"
1911 "cated ioctl Z90STAT_PCIXCCCOUNT.\n");
1912 }
1913
1914 tempstat = get_status_PCIXCCcount();
1915 if (copy_to_user((int *)arg, &tempstat, sizeof(int)) != 0)
1916 ret = -EFAULT;
1917 break;
1918
1919 case Z90QUIESCE:
1920 if (current->euid != 0) {
1921 PRINTK("QUIESCE fails: euid %d\n",
1922 current->euid);
1923 ret = -EACCES;
1924 } else {
1925 PRINTK("QUIESCE device from PID %d\n", PID());
1926 quiesce_z90crypt = 1;
1927 }
1928 break;
1929
1930 default:
1931 /* user passed an invalid IOCTL number */
1932 PDEBUG("cmd 0x%08X contains invalid ioctl code\n", cmd);
1933 ret = -ENOTTY;
1934 break;
1935 }
1936
1937 return ret;
1938}
1939
1940static inline int
1941sprintcl(unsigned char *outaddr, unsigned char *addr, unsigned int len)
1942{
1943 int hl, i;
1944
1945 hl = 0;
1946 for (i = 0; i < len; i++)
1947 hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]);
1948 hl += sprintf(outaddr+hl, " ");
1949
1950 return hl;
1951}
1952
1953static inline int
1954sprintrw(unsigned char *outaddr, unsigned char *addr, unsigned int len)
1955{
1956 int hl, inl, c, cx;
1957
1958 hl = sprintf(outaddr, " ");
1959 inl = 0;
1960 for (c = 0; c < (len / 16); c++) {
1961 hl += sprintcl(outaddr+hl, addr+inl, 16);
1962 inl += 16;
1963 }
1964
1965 cx = len%16;
1966 if (cx) {
1967 hl += sprintcl(outaddr+hl, addr+inl, cx);
1968 inl += cx;
1969 }
1970
1971 hl += sprintf(outaddr+hl, "\n");
1972
1973 return hl;
1974}
1975
1976static inline int
1977sprinthx(unsigned char *title, unsigned char *outaddr,
1978 unsigned char *addr, unsigned int len)
1979{
1980 int hl, inl, r, rx;
1981
1982 hl = sprintf(outaddr, "\n%s\n", title);
1983 inl = 0;
1984 for (r = 0; r < (len / 64); r++) {
1985 hl += sprintrw(outaddr+hl, addr+inl, 64);
1986 inl += 64;
1987 }
1988 rx = len % 64;
1989 if (rx) {
1990 hl += sprintrw(outaddr+hl, addr+inl, rx);
1991 inl += rx;
1992 }
1993
1994 hl += sprintf(outaddr+hl, "\n");
1995
1996 return hl;
1997}
1998
1999static inline int
2000sprinthx4(unsigned char *title, unsigned char *outaddr,
2001 unsigned int *array, unsigned int len)
2002{
2003 int hl, r;
2004
2005 hl = sprintf(outaddr, "\n%s\n", title);
2006
2007 for (r = 0; r < len; r++) {
2008 if ((r % 8) == 0)
2009 hl += sprintf(outaddr+hl, " ");
2010 hl += sprintf(outaddr+hl, "%08X ", array[r]);
2011 if ((r % 8) == 7)
2012 hl += sprintf(outaddr+hl, "\n");
2013 }
2014
2015 hl += sprintf(outaddr+hl, "\n");
2016
2017 return hl;
2018}
2019
2020static int
2021z90crypt_status(char *resp_buff, char **start, off_t offset,
2022 int count, int *eof, void *data)
2023{
2024 unsigned char *workarea;
2025 int len;
2026
2027 /* resp_buff is a page. Use the right half for a work area */
2028 workarea = resp_buff+2000;
2029 len = 0;
2030 len += sprintf(resp_buff+len, "\nz90crypt version: %d.%d.%d\n",
2031 z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT);
2032 len += sprintf(resp_buff+len, "Cryptographic domain: %d\n",
2033 get_status_domain_index());
2034 len += sprintf(resp_buff+len, "Total device count: %d\n",
2035 get_status_totalcount());
2036 len += sprintf(resp_buff+len, "PCICA count: %d\n",
2037 get_status_PCICAcount());
2038 len += sprintf(resp_buff+len, "PCICC count: %d\n",
2039 get_status_PCICCcount());
2040 len += sprintf(resp_buff+len, "PCIXCC MCL2 count: %d\n",
2041 get_status_PCIXCCMCL2count());
2042 len += sprintf(resp_buff+len, "PCIXCC MCL3 count: %d\n",
2043 get_status_PCIXCCMCL3count());
2044 len += sprintf(resp_buff+len, "CEX2C count: %d\n",
2045 get_status_CEX2Ccount());
2046 len += sprintf(resp_buff+len, "CEX2A count: %d\n",
2047 get_status_CEX2Acount());
2048 len += sprintf(resp_buff+len, "requestq count: %d\n",
2049 get_status_requestq_count());
2050 len += sprintf(resp_buff+len, "pendingq count: %d\n",
2051 get_status_pendingq_count());
2052 len += sprintf(resp_buff+len, "Total open handles: %d\n\n",
2053 get_status_totalopen_count());
2054 len += sprinthx(
2055 "Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
2056 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A",
2057 resp_buff+len,
2058 get_status_status_mask(workarea),
2059 Z90CRYPT_NUM_APS);
2060 len += sprinthx("Waiting work element counts",
2061 resp_buff+len,
2062 get_status_qdepth_mask(workarea),
2063 Z90CRYPT_NUM_APS);
2064 len += sprinthx4(
2065 "Per-device successfully completed request counts",
2066 resp_buff+len,
2067 get_status_perdevice_reqcnt((unsigned int *)workarea),
2068 Z90CRYPT_NUM_APS);
2069 *eof = 1;
2070 memset(workarea, 0, Z90CRYPT_NUM_APS * sizeof(unsigned int));
2071 return len;
2072}
2073
2074static inline void
2075disable_card(int card_index)
2076{
2077 struct device *devp;
2078
2079 devp = LONG2DEVPTR(card_index);
2080 if (!devp || devp->user_disabled)
2081 return;
2082 devp->user_disabled = 1;
2083 z90crypt.hdware_info->hdware_mask.user_disabled_count++;
2084 if (devp->dev_type == -1)
2085 return;
2086 z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count++;
2087}
2088
2089static inline void
2090enable_card(int card_index)
2091{
2092 struct device *devp;
2093
2094 devp = LONG2DEVPTR(card_index);
2095 if (!devp || !devp->user_disabled)
2096 return;
2097 devp->user_disabled = 0;
2098 z90crypt.hdware_info->hdware_mask.user_disabled_count--;
2099 if (devp->dev_type == -1)
2100 return;
2101 z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count--;
2102}
2103
2104static int
2105z90crypt_status_write(struct file *file, const char __user *buffer,
2106 unsigned long count, void *data)
2107{
2108 int j, eol;
2109 unsigned char *lbuf, *ptr;
2110 unsigned int local_count;
2111
2112#define LBUFSIZE 1200
2113 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
2114 if (!lbuf) {
2115 PRINTK("kmalloc failed!\n");
2116 return 0;
2117 }
2118
2119 if (count <= 0)
2120 return 0;
2121
2122 local_count = UMIN((unsigned int)count, LBUFSIZE-1);
2123
2124 if (copy_from_user(lbuf, buffer, local_count) != 0) {
2125 kfree(lbuf);
2126 return -EFAULT;
2127 }
2128
2129 lbuf[local_count] = '\0';
2130
2131 ptr = strstr(lbuf, "Online devices");
2132 if (ptr == 0) {
2133 PRINTK("Unable to parse data (missing \"Online devices\")\n");
2134 kfree(lbuf);
2135 return count;
2136 }
2137
2138 ptr = strstr(ptr, "\n");
2139 if (ptr == 0) {
2140 PRINTK("Unable to parse data (missing newline after \"Online devices\")\n");
2141 kfree(lbuf);
2142 return count;
2143 }
2144 ptr++;
2145
2146 if (strstr(ptr, "Waiting work element counts") == NULL) {
2147 PRINTK("Unable to parse data (missing \"Waiting work element counts\")\n");
2148 kfree(lbuf);
2149 return count;
2150 }
2151
2152 j = 0;
2153 eol = 0;
2154 while ((j < 64) && (*ptr != '\0')) {
2155 switch (*ptr) {
2156 case '\t':
2157 case ' ':
2158 break;
2159 case '\n':
2160 default:
2161 eol = 1;
2162 break;
2163 case '0': // no device
2164 case '1': // PCICA
2165 case '2': // PCICC
2166 case '3': // PCIXCC_MCL2
2167 case '4': // PCIXCC_MCL3
2168 case '5': // CEX2C
2169 case '6': // CEX2A
2170 j++;
2171 break;
2172 case 'd':
2173 case 'D':
2174 disable_card(j);
2175 j++;
2176 break;
2177 case 'e':
2178 case 'E':
2179 enable_card(j);
2180 j++;
2181 break;
2182 }
2183 if (eol)
2184 break;
2185 ptr++;
2186 }
2187
2188 kfree(lbuf);
2189 return count;
2190}
2191
2192/**
2193 * Functions that run under a timer, with no process id
2194 *
2195 * The task functions:
2196 * z90crypt_reader_task
2197 * helper_send_work
2198 * helper_handle_work_element
2199 * helper_receive_rc
2200 * z90crypt_config_task
2201 * z90crypt_cleanup_task
2202 *
2203 * Helper functions:
2204 * z90crypt_schedule_reader_timer
2205 * z90crypt_schedule_reader_task
2206 * z90crypt_schedule_config_task
2207 * z90crypt_schedule_cleanup_task
2208 */
2209static inline int
2210receive_from_crypto_device(int index, unsigned char *psmid, int *buff_len_p,
2211 unsigned char *buff, unsigned char __user **dest_p_p)
2212{
2213 int dv, rv;
2214 struct device *dev_ptr;
2215 struct caller *caller_p;
2216 struct ica_rsa_modexpo *icaMsg_p;
2217 struct list_head *ptr, *tptr;
2218
2219 memcpy(psmid, NULL_psmid, sizeof(NULL_psmid));
2220
2221 if (z90crypt.terminating)
2222 return REC_FATAL_ERROR;
2223
2224 caller_p = 0;
2225 dev_ptr = z90crypt.device_p[index];
2226 rv = 0;
2227 do {
2228 if (!dev_ptr || dev_ptr->disabled) {
2229 rv = REC_NO_WORK; // a disabled device can't return work
2230 break;
2231 }
2232 if (dev_ptr->dev_self_x != index) {
2233 PRINTKC("Corrupt dev ptr\n");
2234 z90crypt.terminating = 1;
2235 rv = REC_FATAL_ERROR;
2236 break;
2237 }
2238 if (!dev_ptr->dev_resp_l || !dev_ptr->dev_resp_p) {
2239 dv = DEV_REC_EXCEPTION;
2240 PRINTK("dev_resp_l = %d, dev_resp_p = %p\n",
2241 dev_ptr->dev_resp_l, dev_ptr->dev_resp_p);
2242 } else {
2243 PDEBUG("Dequeue called for device %d\n", index);
2244 dv = receive_from_AP(index, z90crypt.cdx,
2245 dev_ptr->dev_resp_l,
2246 dev_ptr->dev_resp_p, psmid);
2247 }
2248 switch (dv) {
2249 case DEV_REC_EXCEPTION:
2250 rv = REC_FATAL_ERROR;
2251 z90crypt.terminating = 1;
2252 PRINTKC("Exception in receive from device %d\n",
2253 index);
2254 break;
2255 case DEV_ONLINE:
2256 rv = 0;
2257 break;
2258 case DEV_EMPTY:
2259 rv = REC_EMPTY;
2260 break;
2261 case DEV_NO_WORK:
2262 rv = REC_NO_WORK;
2263 break;
2264 case DEV_BAD_MESSAGE:
2265 case DEV_GONE:
2266 case REC_HARDWAR_ERR:
2267 default:
2268 rv = REC_NO_RESPONSE;
2269 break;
2270 }
2271 if (rv)
2272 break;
2273 if (dev_ptr->dev_caller_count <= 0) {
2274 rv = REC_USER_GONE;
2275 break;
2276 }
2277
2278 list_for_each_safe(ptr, tptr, &dev_ptr->dev_caller_list) {
2279 caller_p = list_entry(ptr, struct caller, caller_liste);
2280 if (!memcmp(caller_p->caller_id, psmid,
2281 sizeof(caller_p->caller_id))) {
2282 if (!list_empty(&caller_p->caller_liste)) {
2283 list_del_init(ptr);
2284 dev_ptr->dev_caller_count--;
2285 break;
2286 }
2287 }
2288 caller_p = 0;
2289 }
2290 if (!caller_p) {
2291 PRINTKW("Unable to locate PSMID %02X%02X%02X%02X%02X"
2292 "%02X%02X%02X in device list\n",
2293 psmid[0], psmid[1], psmid[2], psmid[3],
2294 psmid[4], psmid[5], psmid[6], psmid[7]);
2295 rv = REC_USER_GONE;
2296 break;
2297 }
2298
2299 PDEBUG("caller_p after successful receive: %p\n", caller_p);
2300 rv = convert_response(dev_ptr->dev_resp_p,
2301 caller_p->caller_buf_p, buff_len_p, buff);
2302 switch (rv) {
2303 case REC_USE_PCICA:
2304 break;
2305 case REC_OPERAND_INV:
2306 case REC_OPERAND_SIZE:
2307 case REC_EVEN_MOD:
2308 case REC_INVALID_PAD:
2309 PDEBUG("device %d: 'user error' %d\n", index, rv);
2310 break;
2311 case WRONG_DEVICE_TYPE:
2312 case REC_HARDWAR_ERR:
2313 case REC_BAD_MESSAGE:
2314 PRINTKW("device %d: hardware error %d\n", index, rv);
2315 rv = REC_NO_RESPONSE;
2316 break;
2317 default:
2318 PDEBUG("device %d: rv = %d\n", index, rv);
2319 break;
2320 }
2321 } while (0);
2322
2323 switch (rv) {
2324 case 0:
2325 PDEBUG("Successful receive from device %d\n", index);
2326 icaMsg_p = (struct ica_rsa_modexpo *)caller_p->caller_buf_p;
2327 *dest_p_p = icaMsg_p->outputdata;
2328 if (*buff_len_p == 0)
2329 PRINTK("Zero *buff_len_p\n");
2330 break;
2331 case REC_NO_RESPONSE:
2332 PRINTKW("Removing device %d from availability\n", index);
2333 remove_device(dev_ptr);
2334 break;
2335 }
2336
2337 if (caller_p)
2338 unbuild_caller(dev_ptr, caller_p);
2339
2340 return rv;
2341}
2342
2343static inline void
2344helper_send_work(int index)
2345{
2346 struct work_element *rq_p;
2347 int rv;
2348
2349 if (list_empty(&request_list))
2350 return;
2351 requestq_count--;
2352 rq_p = list_entry(request_list.next, struct work_element, liste);
2353 list_del_init(&rq_p->liste);
2354 rq_p->audit[1] |= FP_REMREQUEST;
2355 if (rq_p->devtype == SHRT2DEVPTR(index)->dev_type) {
2356 rq_p->devindex = SHRT2LONG(index);
2357 rv = send_to_crypto_device(rq_p);
2358 if (rv == 0) {
2359 rq_p->requestsent = jiffies;
2360 rq_p->audit[0] |= FP_SENT;
2361 list_add_tail(&rq_p->liste, &pending_list);
2362 ++pendingq_count;
2363 rq_p->audit[0] |= FP_PENDING;
2364 } else {
2365 switch (rv) {
2366 case REC_OPERAND_INV:
2367 case REC_OPERAND_SIZE:
2368 case REC_EVEN_MOD:
2369 case REC_INVALID_PAD:
2370 rq_p->retcode = -EINVAL;
2371 break;
2372 case SEN_NOT_AVAIL:
2373 case SEN_RETRY:
2374 case REC_NO_RESPONSE:
2375 default:
2376 if (z90crypt.mask.st_count > 1)
2377 rq_p->retcode =
2378 -ERESTARTSYS;
2379 else
2380 rq_p->retcode = -ENODEV;
2381 break;
2382 }
2383 rq_p->status[0] |= STAT_FAILED;
2384 rq_p->audit[1] |= FP_AWAKENING;
2385 atomic_set(&rq_p->alarmrung, 1);
2386 wake_up(&rq_p->waitq);
2387 }
2388 } else {
2389 if (z90crypt.mask.st_count > 1)
2390 rq_p->retcode = -ERESTARTSYS;
2391 else
2392 rq_p->retcode = -ENODEV;
2393 rq_p->status[0] |= STAT_FAILED;
2394 rq_p->audit[1] |= FP_AWAKENING;
2395 atomic_set(&rq_p->alarmrung, 1);
2396 wake_up(&rq_p->waitq);
2397 }
2398}
2399
2400static inline void
2401helper_handle_work_element(int index, unsigned char psmid[8], int rc,
2402 int buff_len, unsigned char *buff,
2403 unsigned char __user *resp_addr)
2404{
2405 struct work_element *pq_p;
2406 struct list_head *lptr, *tptr;
2407
2408 pq_p = 0;
2409 list_for_each_safe(lptr, tptr, &pending_list) {
2410 pq_p = list_entry(lptr, struct work_element, liste);
2411 if (!memcmp(pq_p->caller_id, psmid, sizeof(pq_p->caller_id))) {
2412 list_del_init(lptr);
2413 pendingq_count--;
2414 pq_p->audit[1] |= FP_NOTPENDING;
2415 break;
2416 }
2417 pq_p = 0;
2418 }
2419
2420 if (!pq_p) {
2421 PRINTK("device %d has work but no caller exists on pending Q\n",
2422 SHRT2LONG(index));
2423 return;
2424 }
2425
2426 switch (rc) {
2427 case 0:
2428 pq_p->resp_buff_size = buff_len;
2429 pq_p->audit[1] |= FP_RESPSIZESET;
2430 if (buff_len) {
2431 pq_p->resp_addr = resp_addr;
2432 pq_p->audit[1] |= FP_RESPADDRCOPIED;
2433 memcpy(pq_p->resp_buff, buff, buff_len);
2434 pq_p->audit[1] |= FP_RESPBUFFCOPIED;
2435 }
2436 break;
2437 case REC_OPERAND_INV:
2438 case REC_OPERAND_SIZE:
2439 case REC_EVEN_MOD:
2440 case REC_INVALID_PAD:
2441 PDEBUG("-EINVAL after application error %d\n", rc);
2442 pq_p->retcode = -EINVAL;
2443 pq_p->status[0] |= STAT_FAILED;
2444 break;
2445 case REC_USE_PCICA:
2446 pq_p->retcode = -ERESTARTSYS;
2447 pq_p->status[0] |= STAT_FAILED;
2448 break;
2449 case REC_NO_RESPONSE:
2450 default:
2451 if (z90crypt.mask.st_count > 1)
2452 pq_p->retcode = -ERESTARTSYS;
2453 else
2454 pq_p->retcode = -ENODEV;
2455 pq_p->status[0] |= STAT_FAILED;
2456 break;
2457 }
2458 if ((pq_p->status[0] != STAT_FAILED) || (pq_p->retcode != -ERELEASED)) {
2459 pq_p->audit[1] |= FP_AWAKENING;
2460 atomic_set(&pq_p->alarmrung, 1);
2461 wake_up(&pq_p->waitq);
2462 }
2463}
2464
2465/**
2466 * return TRUE if the work element should be removed from the queue
2467 */
2468static inline int
2469helper_receive_rc(int index, int *rc_p)
2470{
2471 switch (*rc_p) {
2472 case 0:
2473 case REC_OPERAND_INV:
2474 case REC_OPERAND_SIZE:
2475 case REC_EVEN_MOD:
2476 case REC_INVALID_PAD:
2477 case REC_USE_PCICA:
2478 break;
2479
2480 case REC_BUSY:
2481 case REC_NO_WORK:
2482 case REC_EMPTY:
2483 case REC_RETRY_DEV:
2484 case REC_FATAL_ERROR:
2485 return 0;
2486
2487 case REC_NO_RESPONSE:
2488 break;
2489
2490 default:
2491 PRINTK("rc %d, device %d converted to REC_NO_RESPONSE\n",
2492 *rc_p, SHRT2LONG(index));
2493 *rc_p = REC_NO_RESPONSE;
2494 break;
2495 }
2496 return 1;
2497}
2498
2499static inline void
2500z90crypt_schedule_reader_timer(void)
2501{
2502 if (timer_pending(&reader_timer))
2503 return;
2504 if (mod_timer(&reader_timer, jiffies+(READERTIME*HZ/1000)) != 0)
2505 PRINTK("Timer pending while modifying reader timer\n");
2506}
2507
2508static void
2509z90crypt_reader_task(unsigned long ptr)
2510{
2511 int workavail, index, rc, buff_len;
2512 unsigned char psmid[8];
2513 unsigned char __user *resp_addr;
2514 static unsigned char buff[1024];
2515
2516 /**
2517 * we use workavail = 2 to ensure 2 passes with nothing dequeued before
2518 * exiting the loop. If (pendingq_count+requestq_count) == 0 after the
2519 * loop, there is no work remaining on the queues.
2520 */
2521 resp_addr = 0;
2522 workavail = 2;
2523 buff_len = 0;
2524 while (workavail) {
2525 workavail--;
2526 rc = 0;
2527 spin_lock_irq(&queuespinlock);
2528 memset(buff, 0x00, sizeof(buff));
2529
2530 /* Dequeue once from each device in round robin. */
2531 for (index = 0; index < z90crypt.mask.st_count; index++) {
2532 PDEBUG("About to receive.\n");
2533 rc = receive_from_crypto_device(SHRT2LONG(index),
2534 psmid,
2535 &buff_len,
2536 buff,
2537 &resp_addr);
2538 PDEBUG("Dequeued: rc = %d.\n", rc);
2539
2540 if (helper_receive_rc(index, &rc)) {
2541 if (rc != REC_NO_RESPONSE) {
2542 helper_send_work(index);
2543 workavail = 2;
2544 }
2545
2546 helper_handle_work_element(index, psmid, rc,
2547 buff_len, buff,
2548 resp_addr);
2549 }
2550
2551 if (rc == REC_FATAL_ERROR)
2552 PRINTKW("REC_FATAL_ERROR from device %d!\n",
2553 SHRT2LONG(index));
2554 }
2555 spin_unlock_irq(&queuespinlock);
2556 }
2557
2558 if (pendingq_count + requestq_count)
2559 z90crypt_schedule_reader_timer();
2560}
2561
2562static inline void
2563z90crypt_schedule_config_task(unsigned int expiration)
2564{
2565 if (timer_pending(&config_timer))
2566 return;
2567 if (mod_timer(&config_timer, jiffies+(expiration*HZ)) != 0)
2568 PRINTK("Timer pending while modifying config timer\n");
2569}
2570
2571static void
2572z90crypt_config_task(unsigned long ptr)
2573{
2574 int rc;
2575
2576 PDEBUG("jiffies %ld\n", jiffies);
2577
2578 if ((rc = refresh_z90crypt(&z90crypt.cdx)))
2579 PRINTK("Error %d detected in refresh_z90crypt.\n", rc);
2580 /* If return was fatal, don't bother reconfiguring */
2581 if ((rc != TSQ_FATAL_ERROR) && (rc != RSQ_FATAL_ERROR))
2582 z90crypt_schedule_config_task(CONFIGTIME);
2583}
2584
2585static inline void
2586z90crypt_schedule_cleanup_task(void)
2587{
2588 if (timer_pending(&cleanup_timer))
2589 return;
2590 if (mod_timer(&cleanup_timer, jiffies+(CLEANUPTIME*HZ)) != 0)
2591 PRINTK("Timer pending while modifying cleanup timer\n");
2592}
2593
2594static inline void
2595helper_drain_queues(void)
2596{
2597 struct work_element *pq_p;
2598 struct list_head *lptr, *tptr;
2599
2600 list_for_each_safe(lptr, tptr, &pending_list) {
2601 pq_p = list_entry(lptr, struct work_element, liste);
2602 pq_p->retcode = -ENODEV;
2603 pq_p->status[0] |= STAT_FAILED;
2604 unbuild_caller(LONG2DEVPTR(pq_p->devindex),
2605 (struct caller *)pq_p->requestptr);
2606 list_del_init(lptr);
2607 pendingq_count--;
2608 pq_p->audit[1] |= FP_NOTPENDING;
2609 pq_p->audit[1] |= FP_AWAKENING;
2610 atomic_set(&pq_p->alarmrung, 1);
2611 wake_up(&pq_p->waitq);
2612 }
2613
2614 list_for_each_safe(lptr, tptr, &request_list) {
2615 pq_p = list_entry(lptr, struct work_element, liste);
2616 pq_p->retcode = -ENODEV;
2617 pq_p->status[0] |= STAT_FAILED;
2618 list_del_init(lptr);
2619 requestq_count--;
2620 pq_p->audit[1] |= FP_REMREQUEST;
2621 pq_p->audit[1] |= FP_AWAKENING;
2622 atomic_set(&pq_p->alarmrung, 1);
2623 wake_up(&pq_p->waitq);
2624 }
2625}
2626
2627static inline void
2628helper_timeout_requests(void)
2629{
2630 struct work_element *pq_p;
2631 struct list_head *lptr, *tptr;
2632 long timelimit;
2633
2634 timelimit = jiffies - (CLEANUPTIME * HZ);
2635 /* The list is in strict chronological order */
2636 list_for_each_safe(lptr, tptr, &pending_list) {
2637 pq_p = list_entry(lptr, struct work_element, liste);
2638 if (pq_p->requestsent >= timelimit)
2639 break;
2640 PRINTKW("Purging(PQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
2641 ((struct caller *)pq_p->requestptr)->caller_id[0],
2642 ((struct caller *)pq_p->requestptr)->caller_id[1],
2643 ((struct caller *)pq_p->requestptr)->caller_id[2],
2644 ((struct caller *)pq_p->requestptr)->caller_id[3],
2645 ((struct caller *)pq_p->requestptr)->caller_id[4],
2646 ((struct caller *)pq_p->requestptr)->caller_id[5],
2647 ((struct caller *)pq_p->requestptr)->caller_id[6],
2648 ((struct caller *)pq_p->requestptr)->caller_id[7]);
2649 pq_p->retcode = -ETIMEOUT;
2650 pq_p->status[0] |= STAT_FAILED;
2651 /* get this off any caller queue it may be on */
2652 unbuild_caller(LONG2DEVPTR(pq_p->devindex),
2653 (struct caller *) pq_p->requestptr);
2654 list_del_init(lptr);
2655 pendingq_count--;
2656 pq_p->audit[1] |= FP_TIMEDOUT;
2657 pq_p->audit[1] |= FP_NOTPENDING;
2658 pq_p->audit[1] |= FP_AWAKENING;
2659 atomic_set(&pq_p->alarmrung, 1);
2660 wake_up(&pq_p->waitq);
2661 }
2662
2663 /**
2664 * If pending count is zero, items left on the request queue may
2665 * never be processed.
2666 */
2667 if (pendingq_count <= 0) {
2668 list_for_each_safe(lptr, tptr, &request_list) {
2669 pq_p = list_entry(lptr, struct work_element, liste);
2670 if (pq_p->requestsent >= timelimit)
2671 break;
2672 PRINTKW("Purging(RQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
2673 ((struct caller *)pq_p->requestptr)->caller_id[0],
2674 ((struct caller *)pq_p->requestptr)->caller_id[1],
2675 ((struct caller *)pq_p->requestptr)->caller_id[2],
2676 ((struct caller *)pq_p->requestptr)->caller_id[3],
2677 ((struct caller *)pq_p->requestptr)->caller_id[4],
2678 ((struct caller *)pq_p->requestptr)->caller_id[5],
2679 ((struct caller *)pq_p->requestptr)->caller_id[6],
2680 ((struct caller *)pq_p->requestptr)->caller_id[7]);
2681 pq_p->retcode = -ETIMEOUT;
2682 pq_p->status[0] |= STAT_FAILED;
2683 list_del_init(lptr);
2684 requestq_count--;
2685 pq_p->audit[1] |= FP_TIMEDOUT;
2686 pq_p->audit[1] |= FP_REMREQUEST;
2687 pq_p->audit[1] |= FP_AWAKENING;
2688 atomic_set(&pq_p->alarmrung, 1);
2689 wake_up(&pq_p->waitq);
2690 }
2691 }
2692}
2693
2694static void
2695z90crypt_cleanup_task(unsigned long ptr)
2696{
2697 PDEBUG("jiffies %ld\n", jiffies);
2698 spin_lock_irq(&queuespinlock);
2699 if (z90crypt.mask.st_count <= 0) // no devices!
2700 helper_drain_queues();
2701 else
2702 helper_timeout_requests();
2703 spin_unlock_irq(&queuespinlock);
2704 z90crypt_schedule_cleanup_task();
2705}
2706
2707static void
2708z90crypt_schedule_reader_task(unsigned long ptr)
2709{
2710 tasklet_schedule(&reader_tasklet);
2711}
2712
2713/**
2714 * Lowlevel Functions:
2715 *
2716 * create_z90crypt: creates and initializes basic data structures
2717 * refresh_z90crypt: re-initializes basic data structures
2718 * find_crypto_devices: returns a count and mask of hardware status
2719 * create_crypto_device: builds the descriptor for a device
2720 * destroy_crypto_device: unallocates the descriptor for a device
2721 * destroy_z90crypt: drains all work, unallocates structs
2722 */
2723
2724/**
2725 * build the z90crypt root structure using the given domain index
2726 */
2727static int
2728create_z90crypt(int *cdx_p)
2729{
2730 struct hdware_block *hdware_blk_p;
2731
2732 memset(&z90crypt, 0x00, sizeof(struct z90crypt));
2733 z90crypt.domain_established = 0;
2734 z90crypt.len = sizeof(struct z90crypt);
2735 z90crypt.max_count = Z90CRYPT_NUM_DEVS;
2736 z90crypt.cdx = *cdx_p;
2737
2738 hdware_blk_p = kzalloc(sizeof(struct hdware_block), GFP_ATOMIC);
2739 if (!hdware_blk_p) {
2740 PDEBUG("kmalloc for hardware block failed\n");
2741 return ENOMEM;
2742 }
2743 z90crypt.hdware_info = hdware_blk_p;
2744
2745 return 0;
2746}
2747
2748static inline int
2749helper_scan_devices(int cdx_array[16], int *cdx_p, int *correct_cdx_found)
2750{
2751 enum hdstat hd_stat;
2752 int q_depth, dev_type;
2753 int indx, chkdom, numdomains;
2754
2755 q_depth = dev_type = numdomains = 0;
2756 for (chkdom = 0; chkdom <= 15; cdx_array[chkdom++] = -1);
2757 for (indx = 0; indx < z90crypt.max_count; indx++) {
2758 hd_stat = HD_NOT_THERE;
2759 numdomains = 0;
2760 for (chkdom = 0; chkdom <= 15; chkdom++) {
2761 hd_stat = query_online(indx, chkdom, MAX_RESET,
2762 &q_depth, &dev_type);
2763 if (hd_stat == HD_TSQ_EXCEPTION) {
2764 z90crypt.terminating = 1;
2765 PRINTKC("exception taken!\n");
2766 break;
2767 }
2768 if (hd_stat == HD_ONLINE) {
2769 cdx_array[numdomains++] = chkdom;
2770 if (*cdx_p == chkdom) {
2771 *correct_cdx_found = 1;
2772 break;
2773 }
2774 }
2775 }
2776 if ((*correct_cdx_found == 1) || (numdomains != 0))
2777 break;
2778 if (z90crypt.terminating)
2779 break;
2780 }
2781 return numdomains;
2782}
2783
2784static inline int
2785probe_crypto_domain(int *cdx_p)
2786{
2787 int cdx_array[16];
2788 char cdx_array_text[53], temp[5];
2789 int correct_cdx_found, numdomains;
2790
2791 correct_cdx_found = 0;
2792 numdomains = helper_scan_devices(cdx_array, cdx_p, &correct_cdx_found);
2793
2794 if (z90crypt.terminating)
2795 return TSQ_FATAL_ERROR;
2796
2797 if (correct_cdx_found)
2798 return 0;
2799
2800 if (numdomains == 0) {
2801 PRINTKW("Unable to find crypto domain: No devices found\n");
2802 return Z90C_NO_DEVICES;
2803 }
2804
2805 if (numdomains == 1) {
2806 if (*cdx_p == -1) {
2807 *cdx_p = cdx_array[0];
2808 return 0;
2809 }
2810 PRINTKW("incorrect domain: specified = %d, found = %d\n",
2811 *cdx_p, cdx_array[0]);
2812 return Z90C_INCORRECT_DOMAIN;
2813 }
2814
2815 numdomains--;
2816 sprintf(cdx_array_text, "%d", cdx_array[numdomains]);
2817 while (numdomains) {
2818 numdomains--;
2819 sprintf(temp, ", %d", cdx_array[numdomains]);
2820 strcat(cdx_array_text, temp);
2821 }
2822
2823 PRINTKW("ambiguous domain detected: specified = %d, found array = %s\n",
2824 *cdx_p, cdx_array_text);
2825 return Z90C_AMBIGUOUS_DOMAIN;
2826}
2827
2828static int
2829refresh_z90crypt(int *cdx_p)
2830{
2831 int i, j, indx, rv;
2832 static struct status local_mask;
2833 struct device *devPtr;
2834 unsigned char oldStat, newStat;
2835 int return_unchanged;
2836
2837 if (z90crypt.len != sizeof(z90crypt))
2838 return ENOTINIT;
2839 if (z90crypt.terminating)
2840 return TSQ_FATAL_ERROR;
2841 rv = 0;
2842 if (!z90crypt.hdware_info->hdware_mask.st_count &&
2843 !z90crypt.domain_established) {
2844 rv = probe_crypto_domain(cdx_p);
2845 if (z90crypt.terminating)
2846 return TSQ_FATAL_ERROR;
2847 if (rv == Z90C_NO_DEVICES)
2848 return 0; // try later
2849 if (rv)
2850 return rv;
2851 z90crypt.cdx = *cdx_p;
2852 z90crypt.domain_established = 1;
2853 }
2854 rv = find_crypto_devices(&local_mask);
2855 if (rv) {
2856 PRINTK("find crypto devices returned %d\n", rv);
2857 return rv;
2858 }
2859 if (!memcmp(&local_mask, &z90crypt.hdware_info->hdware_mask,
2860 sizeof(struct status))) {
2861 return_unchanged = 1;
2862 for (i = 0; i < Z90CRYPT_NUM_TYPES; i++) {
2863 /**
2864 * Check for disabled cards. If any device is marked
2865 * disabled, destroy it.
2866 */
2867 for (j = 0;
2868 j < z90crypt.hdware_info->type_mask[i].st_count;
2869 j++) {
2870 indx = z90crypt.hdware_info->type_x_addr[i].
2871 device_index[j];
2872 devPtr = z90crypt.device_p[indx];
2873 if (devPtr && devPtr->disabled) {
2874 local_mask.st_mask[indx] = HD_NOT_THERE;
2875 return_unchanged = 0;
2876 }
2877 }
2878 }
2879 if (return_unchanged == 1)
2880 return 0;
2881 }
2882
2883 spin_lock_irq(&queuespinlock);
2884 for (i = 0; i < z90crypt.max_count; i++) {
2885 oldStat = z90crypt.hdware_info->hdware_mask.st_mask[i];
2886 newStat = local_mask.st_mask[i];
2887 if ((oldStat == HD_ONLINE) && (newStat != HD_ONLINE))
2888 destroy_crypto_device(i);
2889 else if ((oldStat != HD_ONLINE) && (newStat == HD_ONLINE)) {
2890 rv = create_crypto_device(i);
2891 if (rv >= REC_FATAL_ERROR)
2892 return rv;
2893 if (rv != 0) {
2894 local_mask.st_mask[i] = HD_NOT_THERE;
2895 local_mask.st_count--;
2896 }
2897 }
2898 }
2899 memcpy(z90crypt.hdware_info->hdware_mask.st_mask, local_mask.st_mask,
2900 sizeof(local_mask.st_mask));
2901 z90crypt.hdware_info->hdware_mask.st_count = local_mask.st_count;
2902 z90crypt.hdware_info->hdware_mask.disabled_count =
2903 local_mask.disabled_count;
2904 refresh_index_array(&z90crypt.mask, &z90crypt.overall_device_x);
2905 for (i = 0; i < Z90CRYPT_NUM_TYPES; i++)
2906 refresh_index_array(&(z90crypt.hdware_info->type_mask[i]),
2907 &(z90crypt.hdware_info->type_x_addr[i]));
2908 spin_unlock_irq(&queuespinlock);
2909
2910 return rv;
2911}
2912
2913static int
2914find_crypto_devices(struct status *deviceMask)
2915{
2916 int i, q_depth, dev_type;
2917 enum hdstat hd_stat;
2918
2919 deviceMask->st_count = 0;
2920 deviceMask->disabled_count = 0;
2921 deviceMask->user_disabled_count = 0;
2922
2923 for (i = 0; i < z90crypt.max_count; i++) {
2924 hd_stat = query_online(i, z90crypt.cdx, MAX_RESET, &q_depth,
2925 &dev_type);
2926 if (hd_stat == HD_TSQ_EXCEPTION) {
2927 z90crypt.terminating = 1;
2928 PRINTKC("Exception during probe for crypto devices\n");
2929 return TSQ_FATAL_ERROR;
2930 }
2931 deviceMask->st_mask[i] = hd_stat;
2932 if (hd_stat == HD_ONLINE) {
2933 PDEBUG("Got an online crypto!: %d\n", i);
2934 PDEBUG("Got a queue depth of %d\n", q_depth);
2935 PDEBUG("Got a device type of %d\n", dev_type);
2936 if (q_depth <= 0)
2937 return TSQ_FATAL_ERROR;
2938 deviceMask->st_count++;
2939 z90crypt.q_depth_array[i] = q_depth;
2940 z90crypt.dev_type_array[i] = dev_type;
2941 }
2942 }
2943
2944 return 0;
2945}
2946
2947static int
2948refresh_index_array(struct status *status_str, struct device_x *index_array)
2949{
2950 int i, count;
2951 enum devstat stat;
2952
2953 i = -1;
2954 count = 0;
2955 do {
2956 stat = status_str->st_mask[++i];
2957 if (stat == DEV_ONLINE)
2958 index_array->device_index[count++] = i;
2959 } while ((i < Z90CRYPT_NUM_DEVS) && (count < status_str->st_count));
2960
2961 return count;
2962}
2963
2964static int
2965create_crypto_device(int index)
2966{
2967 int rv, devstat, total_size;
2968 struct device *dev_ptr;
2969 struct status *type_str_p;
2970 int deviceType;
2971
2972 dev_ptr = z90crypt.device_p[index];
2973 if (!dev_ptr) {
2974 total_size = sizeof(struct device) +
2975 z90crypt.q_depth_array[index] * sizeof(int);
2976
2977 dev_ptr = kzalloc(total_size, GFP_ATOMIC);
2978 if (!dev_ptr) {
2979 PRINTK("kmalloc device %d failed\n", index);
2980 return ENOMEM;
2981 }
2982 dev_ptr->dev_resp_p = kmalloc(MAX_RESPONSE_SIZE, GFP_ATOMIC);
2983 if (!dev_ptr->dev_resp_p) {
2984 kfree(dev_ptr);
2985 PRINTK("kmalloc device %d rec buffer failed\n", index);
2986 return ENOMEM;
2987 }
2988 dev_ptr->dev_resp_l = MAX_RESPONSE_SIZE;
2989 INIT_LIST_HEAD(&(dev_ptr->dev_caller_list));
2990 }
2991
2992 devstat = reset_device(index, z90crypt.cdx, MAX_RESET);
2993 if (devstat == DEV_RSQ_EXCEPTION) {
2994 PRINTK("exception during reset device %d\n", index);
2995 kfree(dev_ptr->dev_resp_p);
2996 kfree(dev_ptr);
2997 return RSQ_FATAL_ERROR;
2998 }
2999 if (devstat == DEV_ONLINE) {
3000 dev_ptr->dev_self_x = index;
3001 dev_ptr->dev_type = z90crypt.dev_type_array[index];
3002 if (dev_ptr->dev_type == NILDEV) {
3003 rv = probe_device_type(dev_ptr);
3004 if (rv) {
3005 PRINTK("rv = %d from probe_device_type %d\n",
3006 rv, index);
3007 kfree(dev_ptr->dev_resp_p);
3008 kfree(dev_ptr);
3009 return rv;
3010 }
3011 }
3012 if (dev_ptr->dev_type == PCIXCC_UNK) {
3013 rv = probe_PCIXCC_type(dev_ptr);
3014 if (rv) {
3015 PRINTK("rv = %d from probe_PCIXCC_type %d\n",
3016 rv, index);
3017 kfree(dev_ptr->dev_resp_p);
3018 kfree(dev_ptr);
3019 return rv;
3020 }
3021 }
3022 deviceType = dev_ptr->dev_type;
3023 z90crypt.dev_type_array[index] = deviceType;
3024 if (deviceType == PCICA)
3025 z90crypt.hdware_info->device_type_array[index] = 1;
3026 else if (deviceType == PCICC)
3027 z90crypt.hdware_info->device_type_array[index] = 2;
3028 else if (deviceType == PCIXCC_MCL2)
3029 z90crypt.hdware_info->device_type_array[index] = 3;
3030 else if (deviceType == PCIXCC_MCL3)
3031 z90crypt.hdware_info->device_type_array[index] = 4;
3032 else if (deviceType == CEX2C)
3033 z90crypt.hdware_info->device_type_array[index] = 5;
3034 else if (deviceType == CEX2A)
3035 z90crypt.hdware_info->device_type_array[index] = 6;
3036 else // No idea how this would happen.
3037 z90crypt.hdware_info->device_type_array[index] = -1;
3038 }
3039
3040 /**
3041 * 'q_depth' returned by the hardware is one less than
3042 * the actual depth
3043 */
3044 dev_ptr->dev_q_depth = z90crypt.q_depth_array[index];
3045 dev_ptr->dev_type = z90crypt.dev_type_array[index];
3046 dev_ptr->dev_stat = devstat;
3047 dev_ptr->disabled = 0;
3048 z90crypt.device_p[index] = dev_ptr;
3049
3050 if (devstat == DEV_ONLINE) {
3051 if (z90crypt.mask.st_mask[index] != DEV_ONLINE) {
3052 z90crypt.mask.st_mask[index] = DEV_ONLINE;
3053 z90crypt.mask.st_count++;
3054 }
3055 deviceType = dev_ptr->dev_type;
3056 type_str_p = &z90crypt.hdware_info->type_mask[deviceType];
3057 if (type_str_p->st_mask[index] != DEV_ONLINE) {
3058 type_str_p->st_mask[index] = DEV_ONLINE;
3059 type_str_p->st_count++;
3060 }
3061 }
3062
3063 return 0;
3064}
3065
3066static int
3067destroy_crypto_device(int index)
3068{
3069 struct device *dev_ptr;
3070 int t, disabledFlag;
3071
3072 dev_ptr = z90crypt.device_p[index];
3073
3074 /* remember device type; get rid of device struct */
3075 if (dev_ptr) {
3076 disabledFlag = dev_ptr->disabled;
3077 t = dev_ptr->dev_type;
3078 kfree(dev_ptr->dev_resp_p);
3079 kfree(dev_ptr);
3080 } else {
3081 disabledFlag = 0;
3082 t = -1;
3083 }
3084 z90crypt.device_p[index] = 0;
3085
3086 /* if the type is valid, remove the device from the type_mask */
3087 if ((t != -1) && z90crypt.hdware_info->type_mask[t].st_mask[index]) {
3088 z90crypt.hdware_info->type_mask[t].st_mask[index] = 0x00;
3089 z90crypt.hdware_info->type_mask[t].st_count--;
3090 if (disabledFlag == 1)
3091 z90crypt.hdware_info->type_mask[t].disabled_count--;
3092 }
3093 if (z90crypt.mask.st_mask[index] != DEV_GONE) {
3094 z90crypt.mask.st_mask[index] = DEV_GONE;
3095 z90crypt.mask.st_count--;
3096 }
3097 z90crypt.hdware_info->device_type_array[index] = 0;
3098
3099 return 0;
3100}
3101
3102static void
3103destroy_z90crypt(void)
3104{
3105 int i;
3106
3107 for (i = 0; i < z90crypt.max_count; i++)
3108 if (z90crypt.device_p[i])
3109 destroy_crypto_device(i);
3110 kfree(z90crypt.hdware_info);
3111 memset((void *)&z90crypt, 0, sizeof(z90crypt));
3112}
3113
3114static unsigned char static_testmsg[384] = {
31150x00,0x00,0x00,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x00,0x06,0x00,0x00,
31160x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x58,
31170x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x43,0x43,
31180x41,0x2d,0x41,0x50,0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,0x00,0x00,0x00,0x00,
31190x50,0x4b,0x00,0x00,0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31200x00,0x00,0x00,0x00,0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31210x00,0x00,0x00,0x00,0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x32,
31220x01,0x00,0xa0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31230xb8,0x05,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31240x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31250x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31260x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31270x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,0x49,0x43,0x53,0x46,
31280x20,0x20,0x20,0x20,0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,0x2d,0x31,0x2e,0x32,
31290x37,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
31300x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
31310x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
31320x77,0x88,0x99,0x00,0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,0x88,0x1e,0x00,0x00,
31330x57,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,0x03,0x02,0x00,0x00,
31340x40,0x01,0x00,0x01,0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,0xf6,0xd2,0x7b,0x58,
31350x4b,0xf9,0x28,0x68,0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,0x63,0x42,0xef,0xf8,
31360xfd,0xa4,0xf8,0xb0,0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,0x53,0x8c,0x6f,0x4e,
31370x72,0x8f,0x6c,0x04,0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,0xf7,0xdd,0xfd,0x4f,
31380x11,0x36,0x95,0x5d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
3139};
3140
3141static int
3142probe_device_type(struct device *devPtr)
3143{
3144 int rv, dv, i, index, length;
3145 unsigned char psmid[8];
3146 static unsigned char loc_testmsg[sizeof(static_testmsg)];
3147
3148 index = devPtr->dev_self_x;
3149 rv = 0;
3150 do {
3151 memcpy(loc_testmsg, static_testmsg, sizeof(static_testmsg));
3152 length = sizeof(static_testmsg) - 24;
3153 /* the -24 allows for the header */
3154 dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
3155 if (dv) {
3156 PDEBUG("dv returned by send during probe: %d\n", dv);
3157 if (dv == DEV_SEN_EXCEPTION) {
3158 rv = SEN_FATAL_ERROR;
3159 PRINTKC("exception in send to AP %d\n", index);
3160 break;
3161 }
3162 PDEBUG("return value from send_to_AP: %d\n", rv);
3163 switch (dv) {
3164 case DEV_GONE:
3165 PDEBUG("dev %d not available\n", index);
3166 rv = SEN_NOT_AVAIL;
3167 break;
3168 case DEV_ONLINE:
3169 rv = 0;
3170 break;
3171 case DEV_EMPTY:
3172 rv = SEN_NOT_AVAIL;
3173 break;
3174 case DEV_NO_WORK:
3175 rv = SEN_FATAL_ERROR;
3176 break;
3177 case DEV_BAD_MESSAGE:
3178 rv = SEN_USER_ERROR;
3179 break;
3180 case DEV_QUEUE_FULL:
3181 rv = SEN_QUEUE_FULL;
3182 break;
3183 default:
3184 PRINTK("unknown dv=%d for dev %d\n", dv, index);
3185 rv = SEN_NOT_AVAIL;
3186 break;
3187 }
3188 }
3189
3190 if (rv)
3191 break;
3192
3193 for (i = 0; i < 6; i++) {
3194 mdelay(300);
3195 dv = receive_from_AP(index, z90crypt.cdx,
3196 devPtr->dev_resp_l,
3197 devPtr->dev_resp_p, psmid);
3198 PDEBUG("dv returned by DQ = %d\n", dv);
3199 if (dv == DEV_REC_EXCEPTION) {
3200 rv = REC_FATAL_ERROR;
3201 PRINTKC("exception in dequeue %d\n",
3202 index);
3203 break;
3204 }
3205 switch (dv) {
3206 case DEV_ONLINE:
3207 rv = 0;
3208 break;
3209 case DEV_EMPTY:
3210 rv = REC_EMPTY;
3211 break;
3212 case DEV_NO_WORK:
3213 rv = REC_NO_WORK;
3214 break;
3215 case DEV_BAD_MESSAGE:
3216 case DEV_GONE:
3217 default:
3218 rv = REC_NO_RESPONSE;
3219 break;
3220 }
3221 if ((rv != 0) && (rv != REC_NO_WORK))
3222 break;
3223 if (rv == 0)
3224 break;
3225 }
3226 if (rv)
3227 break;
3228 rv = (devPtr->dev_resp_p[0] == 0x00) &&
3229 (devPtr->dev_resp_p[1] == 0x86);
3230 if (rv)
3231 devPtr->dev_type = PCICC;
3232 else
3233 devPtr->dev_type = PCICA;
3234 rv = 0;
3235 } while (0);
3236 /* In a general error case, the card is not marked online */
3237 return rv;
3238}
3239
3240static unsigned char MCL3_testmsg[] = {
32410x00,0x00,0x00,0x00,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,
32420x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32430x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32440x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32450x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
32460x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
32470x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
32480x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
32490x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32500x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32510x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32520x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32530x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32540x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32550x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32560x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32570x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32580x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32590x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32600x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32610x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
32620x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
32630x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
32640xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
32650x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
32660x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
32670x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
32680x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
32690x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
32700xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
32710xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
32720x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
32730x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
32740xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
32750x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,0xF1,0x3D,0x93,0x53
3276};
3277
3278static int
3279probe_PCIXCC_type(struct device *devPtr)
3280{
3281 int rv, dv, i, index, length;
3282 unsigned char psmid[8];
3283 static unsigned char loc_testmsg[548];
3284 struct CPRBX *cprbx_p;
3285
3286 index = devPtr->dev_self_x;
3287 rv = 0;
3288 do {
3289 memcpy(loc_testmsg, MCL3_testmsg, sizeof(MCL3_testmsg));
3290 length = sizeof(MCL3_testmsg) - 0x0C;
3291 dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
3292 if (dv) {
3293 PDEBUG("dv returned = %d\n", dv);
3294 if (dv == DEV_SEN_EXCEPTION) {
3295 rv = SEN_FATAL_ERROR;
3296 PRINTKC("exception in send to AP %d\n", index);
3297 break;
3298 }
3299 PDEBUG("return value from send_to_AP: %d\n", rv);
3300 switch (dv) {
3301 case DEV_GONE:
3302 PDEBUG("dev %d not available\n", index);
3303 rv = SEN_NOT_AVAIL;
3304 break;
3305 case DEV_ONLINE:
3306 rv = 0;
3307 break;
3308 case DEV_EMPTY:
3309 rv = SEN_NOT_AVAIL;
3310 break;
3311 case DEV_NO_WORK:
3312 rv = SEN_FATAL_ERROR;
3313 break;
3314 case DEV_BAD_MESSAGE:
3315 rv = SEN_USER_ERROR;
3316 break;
3317 case DEV_QUEUE_FULL:
3318 rv = SEN_QUEUE_FULL;
3319 break;
3320 default:
3321 PRINTK("unknown dv=%d for dev %d\n", dv, index);
3322 rv = SEN_NOT_AVAIL;
3323 break;
3324 }
3325 }
3326
3327 if (rv)
3328 break;
3329
3330 for (i = 0; i < 6; i++) {
3331 mdelay(300);
3332 dv = receive_from_AP(index, z90crypt.cdx,
3333 devPtr->dev_resp_l,
3334 devPtr->dev_resp_p, psmid);
3335 PDEBUG("dv returned by DQ = %d\n", dv);
3336 if (dv == DEV_REC_EXCEPTION) {
3337 rv = REC_FATAL_ERROR;
3338 PRINTKC("exception in dequeue %d\n",
3339 index);
3340 break;
3341 }
3342 switch (dv) {
3343 case DEV_ONLINE:
3344 rv = 0;
3345 break;
3346 case DEV_EMPTY:
3347 rv = REC_EMPTY;
3348 break;
3349 case DEV_NO_WORK:
3350 rv = REC_NO_WORK;
3351 break;
3352 case DEV_BAD_MESSAGE:
3353 case DEV_GONE:
3354 default:
3355 rv = REC_NO_RESPONSE;
3356 break;
3357 }
3358 if ((rv != 0) && (rv != REC_NO_WORK))
3359 break;
3360 if (rv == 0)
3361 break;
3362 }
3363 if (rv)
3364 break;
3365 cprbx_p = (struct CPRBX *) (devPtr->dev_resp_p + 48);
3366 if ((cprbx_p->ccp_rtcode == 8) && (cprbx_p->ccp_rscode == 33)) {
3367 devPtr->dev_type = PCIXCC_MCL2;
3368 PDEBUG("device %d is MCL2\n", index);
3369 } else {
3370 devPtr->dev_type = PCIXCC_MCL3;
3371 PDEBUG("device %d is MCL3\n", index);
3372 }
3373 } while (0);
3374 /* In a general error case, the card is not marked online */
3375 return rv;
3376}
3377
3378module_init(z90crypt_init_module);
3379module_exit(z90crypt_cleanup_module);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
new file mode 100644
index 000000000000..1edc10a7a6f2
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -0,0 +1,1091 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_api.c
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 * Cornelia Huck <cornelia.huck@de.ibm.com>
10 *
11 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
12 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
13 * Ralph Wuerthner <rwuerthn@de.ibm.com>
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2, or (at your option)
18 * any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
28 */
29
30#include <linux/module.h>
31#include <linux/init.h>
32#include <linux/interrupt.h>
33#include <linux/miscdevice.h>
34#include <linux/fs.h>
35#include <linux/proc_fs.h>
36#include <linux/compat.h>
37#include <asm/atomic.h>
38#include <asm/uaccess.h>
39
40#include "zcrypt_api.h"
41
42/**
43 * Module description.
44 */
45MODULE_AUTHOR("IBM Corporation");
46MODULE_DESCRIPTION("Cryptographic Coprocessor interface, "
47 "Copyright 2001, 2006 IBM Corporation");
48MODULE_LICENSE("GPL");
49
50static DEFINE_SPINLOCK(zcrypt_device_lock);
51static LIST_HEAD(zcrypt_device_list);
52static int zcrypt_device_count = 0;
53static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
54
55/**
56 * Device attributes common for all crypto devices.
57 */
58static ssize_t zcrypt_type_show(struct device *dev,
59 struct device_attribute *attr, char *buf)
60{
61 struct zcrypt_device *zdev = to_ap_dev(dev)->private;
62 return snprintf(buf, PAGE_SIZE, "%s\n", zdev->type_string);
63}
64
65static DEVICE_ATTR(type, 0444, zcrypt_type_show, NULL);
66
67static ssize_t zcrypt_online_show(struct device *dev,
68 struct device_attribute *attr, char *buf)
69{
70 struct zcrypt_device *zdev = to_ap_dev(dev)->private;
71 return snprintf(buf, PAGE_SIZE, "%d\n", zdev->online);
72}
73
74static ssize_t zcrypt_online_store(struct device *dev,
75 struct device_attribute *attr,
76 const char *buf, size_t count)
77{
78 struct zcrypt_device *zdev = to_ap_dev(dev)->private;
79 int online;
80
81 if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
82 return -EINVAL;
83 zdev->online = online;
84 if (!online)
85 ap_flush_queue(zdev->ap_dev);
86 return count;
87}
88
89static DEVICE_ATTR(online, 0644, zcrypt_online_show, zcrypt_online_store);
90
91static struct attribute * zcrypt_device_attrs[] = {
92 &dev_attr_type.attr,
93 &dev_attr_online.attr,
94 NULL,
95};
96
97static struct attribute_group zcrypt_device_attr_group = {
98 .attrs = zcrypt_device_attrs,
99};
100
101/**
102 * Move the device towards the head of the device list.
103 * Need to be called while holding the zcrypt device list lock.
104 * Note: cards with speed_rating of 0 are kept at the end of the list.
105 */
106static void __zcrypt_increase_preference(struct zcrypt_device *zdev)
107{
108 struct zcrypt_device *tmp;
109 struct list_head *l;
110
111 if (zdev->speed_rating == 0)
112 return;
113 for (l = zdev->list.prev; l != &zcrypt_device_list; l = l->prev) {
114 tmp = list_entry(l, struct zcrypt_device, list);
115 if ((tmp->request_count + 1) * tmp->speed_rating <=
116 (zdev->request_count + 1) * zdev->speed_rating &&
117 tmp->speed_rating != 0)
118 break;
119 }
120 if (l == zdev->list.prev)
121 return;
122 /* Move zdev behind l */
123 list_del(&zdev->list);
124 list_add(&zdev->list, l);
125}
126
127/**
128 * Move the device towards the tail of the device list.
129 * Need to be called while holding the zcrypt device list lock.
130 * Note: cards with speed_rating of 0 are kept at the end of the list.
131 */
132static void __zcrypt_decrease_preference(struct zcrypt_device *zdev)
133{
134 struct zcrypt_device *tmp;
135 struct list_head *l;
136
137 if (zdev->speed_rating == 0)
138 return;
139 for (l = zdev->list.next; l != &zcrypt_device_list; l = l->next) {
140 tmp = list_entry(l, struct zcrypt_device, list);
141 if ((tmp->request_count + 1) * tmp->speed_rating >
142 (zdev->request_count + 1) * zdev->speed_rating ||
143 tmp->speed_rating == 0)
144 break;
145 }
146 if (l == zdev->list.next)
147 return;
148 /* Move zdev before l */
149 list_del(&zdev->list);
150 list_add_tail(&zdev->list, l);
151}
152
153static void zcrypt_device_release(struct kref *kref)
154{
155 struct zcrypt_device *zdev =
156 container_of(kref, struct zcrypt_device, refcount);
157 zcrypt_device_free(zdev);
158}
159
160void zcrypt_device_get(struct zcrypt_device *zdev)
161{
162 kref_get(&zdev->refcount);
163}
164EXPORT_SYMBOL(zcrypt_device_get);
165
166int zcrypt_device_put(struct zcrypt_device *zdev)
167{
168 return kref_put(&zdev->refcount, zcrypt_device_release);
169}
170EXPORT_SYMBOL(zcrypt_device_put);
171
172struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size)
173{
174 struct zcrypt_device *zdev;
175
176 zdev = kzalloc(sizeof(struct zcrypt_device), GFP_KERNEL);
177 if (!zdev)
178 return NULL;
179 zdev->reply.message = kmalloc(max_response_size, GFP_KERNEL);
180 if (!zdev->reply.message)
181 goto out_free;
182 zdev->reply.length = max_response_size;
183 spin_lock_init(&zdev->lock);
184 INIT_LIST_HEAD(&zdev->list);
185 return zdev;
186
187out_free:
188 kfree(zdev);
189 return NULL;
190}
191EXPORT_SYMBOL(zcrypt_device_alloc);
192
193void zcrypt_device_free(struct zcrypt_device *zdev)
194{
195 kfree(zdev->reply.message);
196 kfree(zdev);
197}
198EXPORT_SYMBOL(zcrypt_device_free);
199
200/**
201 * Register a crypto device.
202 */
203int zcrypt_device_register(struct zcrypt_device *zdev)
204{
205 int rc;
206
207 rc = sysfs_create_group(&zdev->ap_dev->device.kobj,
208 &zcrypt_device_attr_group);
209 if (rc)
210 goto out;
211 get_device(&zdev->ap_dev->device);
212 kref_init(&zdev->refcount);
213 spin_lock_bh(&zcrypt_device_lock);
214 zdev->online = 1; /* New devices are online by default. */
215 list_add_tail(&zdev->list, &zcrypt_device_list);
216 __zcrypt_increase_preference(zdev);
217 zcrypt_device_count++;
218 spin_unlock_bh(&zcrypt_device_lock);
219out:
220 return rc;
221}
222EXPORT_SYMBOL(zcrypt_device_register);
223
224/**
225 * Unregister a crypto device.
226 */
227void zcrypt_device_unregister(struct zcrypt_device *zdev)
228{
229 spin_lock_bh(&zcrypt_device_lock);
230 zcrypt_device_count--;
231 list_del_init(&zdev->list);
232 spin_unlock_bh(&zcrypt_device_lock);
233 sysfs_remove_group(&zdev->ap_dev->device.kobj,
234 &zcrypt_device_attr_group);
235 put_device(&zdev->ap_dev->device);
236 zcrypt_device_put(zdev);
237}
238EXPORT_SYMBOL(zcrypt_device_unregister);
239
240/**
241 * zcrypt_read is not be supported beyond zcrypt 1.3.1
242 */
243static ssize_t zcrypt_read(struct file *filp, char __user *buf,
244 size_t count, loff_t *f_pos)
245{
246 return -EPERM;
247}
248
249/**
250 * Write is is not allowed
251 */
252static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
253 size_t count, loff_t *f_pos)
254{
255 return -EPERM;
256}
257
258/**
259 * Device open/close functions to count number of users.
260 */
261static int zcrypt_open(struct inode *inode, struct file *filp)
262{
263 atomic_inc(&zcrypt_open_count);
264 return 0;
265}
266
267static int zcrypt_release(struct inode *inode, struct file *filp)
268{
269 atomic_dec(&zcrypt_open_count);
270 return 0;
271}
272
273/**
274 * zcrypt ioctls.
275 */
276static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
277{
278 struct zcrypt_device *zdev;
279 int rc;
280
281 if (mex->outputdatalength < mex->inputdatalength)
282 return -EINVAL;
283 /**
284 * As long as outputdatalength is big enough, we can set the
285 * outputdatalength equal to the inputdatalength, since that is the
286 * number of bytes we will copy in any case
287 */
288 mex->outputdatalength = mex->inputdatalength;
289
290 spin_lock_bh(&zcrypt_device_lock);
291 list_for_each_entry(zdev, &zcrypt_device_list, list) {
292 if (!zdev->online ||
293 !zdev->ops->rsa_modexpo ||
294 zdev->min_mod_size > mex->inputdatalength ||
295 zdev->max_mod_size < mex->inputdatalength)
296 continue;
297 zcrypt_device_get(zdev);
298 get_device(&zdev->ap_dev->device);
299 zdev->request_count++;
300 __zcrypt_decrease_preference(zdev);
301 spin_unlock_bh(&zcrypt_device_lock);
302 if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
303 rc = zdev->ops->rsa_modexpo(zdev, mex);
304 module_put(zdev->ap_dev->drv->driver.owner);
305 }
306 else
307 rc = -EAGAIN;
308 spin_lock_bh(&zcrypt_device_lock);
309 zdev->request_count--;
310 __zcrypt_increase_preference(zdev);
311 put_device(&zdev->ap_dev->device);
312 zcrypt_device_put(zdev);
313 spin_unlock_bh(&zcrypt_device_lock);
314 return rc;
315 }
316 spin_unlock_bh(&zcrypt_device_lock);
317 return -ENODEV;
318}
319
320static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
321{
322 struct zcrypt_device *zdev;
323 unsigned long long z1, z2, z3;
324 int rc, copied;
325
326 if (crt->outputdatalength < crt->inputdatalength ||
327 (crt->inputdatalength & 1))
328 return -EINVAL;
329 /**
330 * As long as outputdatalength is big enough, we can set the
331 * outputdatalength equal to the inputdatalength, since that is the
332 * number of bytes we will copy in any case
333 */
334 crt->outputdatalength = crt->inputdatalength;
335
336 copied = 0;
337 restart:
338 spin_lock_bh(&zcrypt_device_lock);
339 list_for_each_entry(zdev, &zcrypt_device_list, list) {
340 if (!zdev->online ||
341 !zdev->ops->rsa_modexpo_crt ||
342 zdev->min_mod_size > crt->inputdatalength ||
343 zdev->max_mod_size < crt->inputdatalength)
344 continue;
345 if (zdev->short_crt && crt->inputdatalength > 240) {
346 /**
347 * Check inputdata for leading zeros for cards
348 * that can't handle np_prime, bp_key, or
349 * u_mult_inv > 128 bytes.
350 */
351 if (copied == 0) {
352 int len;
353 spin_unlock_bh(&zcrypt_device_lock);
354 /* len is max 256 / 2 - 120 = 8 */
355 len = crt->inputdatalength / 2 - 120;
356 z1 = z2 = z3 = 0;
357 if (copy_from_user(&z1, crt->np_prime, len) ||
358 copy_from_user(&z2, crt->bp_key, len) ||
359 copy_from_user(&z3, crt->u_mult_inv, len))
360 return -EFAULT;
361 copied = 1;
362 /**
363 * We have to restart device lookup -
364 * the device list may have changed by now.
365 */
366 goto restart;
367 }
368 if (z1 != 0ULL || z2 != 0ULL || z3 != 0ULL)
369 /* The device can't handle this request. */
370 continue;
371 }
372 zcrypt_device_get(zdev);
373 get_device(&zdev->ap_dev->device);
374 zdev->request_count++;
375 __zcrypt_decrease_preference(zdev);
376 spin_unlock_bh(&zcrypt_device_lock);
377 if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
378 rc = zdev->ops->rsa_modexpo_crt(zdev, crt);
379 module_put(zdev->ap_dev->drv->driver.owner);
380 }
381 else
382 rc = -EAGAIN;
383 spin_lock_bh(&zcrypt_device_lock);
384 zdev->request_count--;
385 __zcrypt_increase_preference(zdev);
386 put_device(&zdev->ap_dev->device);
387 zcrypt_device_put(zdev);
388 spin_unlock_bh(&zcrypt_device_lock);
389 return rc;
390 }
391 spin_unlock_bh(&zcrypt_device_lock);
392 return -ENODEV;
393}
394
395static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
396{
397 struct zcrypt_device *zdev;
398 int rc;
399
400 spin_lock_bh(&zcrypt_device_lock);
401 list_for_each_entry(zdev, &zcrypt_device_list, list) {
402 if (!zdev->online || !zdev->ops->send_cprb ||
403 (xcRB->user_defined != AUTOSELECT &&
404 AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined)
405 )
406 continue;
407 zcrypt_device_get(zdev);
408 get_device(&zdev->ap_dev->device);
409 zdev->request_count++;
410 __zcrypt_decrease_preference(zdev);
411 spin_unlock_bh(&zcrypt_device_lock);
412 if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
413 rc = zdev->ops->send_cprb(zdev, xcRB);
414 module_put(zdev->ap_dev->drv->driver.owner);
415 }
416 else
417 rc = -EAGAIN;
418 spin_lock_bh(&zcrypt_device_lock);
419 zdev->request_count--;
420 __zcrypt_increase_preference(zdev);
421 put_device(&zdev->ap_dev->device);
422 zcrypt_device_put(zdev);
423 spin_unlock_bh(&zcrypt_device_lock);
424 return rc;
425 }
426 spin_unlock_bh(&zcrypt_device_lock);
427 return -ENODEV;
428}
429
430static void zcrypt_status_mask(char status[AP_DEVICES])
431{
432 struct zcrypt_device *zdev;
433
434 memset(status, 0, sizeof(char) * AP_DEVICES);
435 spin_lock_bh(&zcrypt_device_lock);
436 list_for_each_entry(zdev, &zcrypt_device_list, list)
437 status[AP_QID_DEVICE(zdev->ap_dev->qid)] =
438 zdev->online ? zdev->user_space_type : 0x0d;
439 spin_unlock_bh(&zcrypt_device_lock);
440}
441
442static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES])
443{
444 struct zcrypt_device *zdev;
445
446 memset(qdepth, 0, sizeof(char) * AP_DEVICES);
447 spin_lock_bh(&zcrypt_device_lock);
448 list_for_each_entry(zdev, &zcrypt_device_list, list) {
449 spin_lock(&zdev->ap_dev->lock);
450 qdepth[AP_QID_DEVICE(zdev->ap_dev->qid)] =
451 zdev->ap_dev->pendingq_count +
452 zdev->ap_dev->requestq_count;
453 spin_unlock(&zdev->ap_dev->lock);
454 }
455 spin_unlock_bh(&zcrypt_device_lock);
456}
457
458static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES])
459{
460 struct zcrypt_device *zdev;
461
462 memset(reqcnt, 0, sizeof(int) * AP_DEVICES);
463 spin_lock_bh(&zcrypt_device_lock);
464 list_for_each_entry(zdev, &zcrypt_device_list, list) {
465 spin_lock(&zdev->ap_dev->lock);
466 reqcnt[AP_QID_DEVICE(zdev->ap_dev->qid)] =
467 zdev->ap_dev->total_request_count;
468 spin_unlock(&zdev->ap_dev->lock);
469 }
470 spin_unlock_bh(&zcrypt_device_lock);
471}
472
473static int zcrypt_pendingq_count(void)
474{
475 struct zcrypt_device *zdev;
476 int pendingq_count = 0;
477
478 spin_lock_bh(&zcrypt_device_lock);
479 list_for_each_entry(zdev, &zcrypt_device_list, list) {
480 spin_lock(&zdev->ap_dev->lock);
481 pendingq_count += zdev->ap_dev->pendingq_count;
482 spin_unlock(&zdev->ap_dev->lock);
483 }
484 spin_unlock_bh(&zcrypt_device_lock);
485 return pendingq_count;
486}
487
488static int zcrypt_requestq_count(void)
489{
490 struct zcrypt_device *zdev;
491 int requestq_count = 0;
492
493 spin_lock_bh(&zcrypt_device_lock);
494 list_for_each_entry(zdev, &zcrypt_device_list, list) {
495 spin_lock(&zdev->ap_dev->lock);
496 requestq_count += zdev->ap_dev->requestq_count;
497 spin_unlock(&zdev->ap_dev->lock);
498 }
499 spin_unlock_bh(&zcrypt_device_lock);
500 return requestq_count;
501}
502
503static int zcrypt_count_type(int type)
504{
505 struct zcrypt_device *zdev;
506 int device_count = 0;
507
508 spin_lock_bh(&zcrypt_device_lock);
509 list_for_each_entry(zdev, &zcrypt_device_list, list)
510 if (zdev->user_space_type == type)
511 device_count++;
512 spin_unlock_bh(&zcrypt_device_lock);
513 return device_count;
514}
515
516/**
517 * Old, deprecated combi status call.
518 */
519static long zcrypt_ica_status(struct file *filp, unsigned long arg)
520{
521 struct ica_z90_status *pstat;
522 int ret;
523
524 pstat = kzalloc(sizeof(*pstat), GFP_KERNEL);
525 if (!pstat)
526 return -ENOMEM;
527 pstat->totalcount = zcrypt_device_count;
528 pstat->leedslitecount = zcrypt_count_type(ZCRYPT_PCICA);
529 pstat->leeds2count = zcrypt_count_type(ZCRYPT_PCICC);
530 pstat->requestqWaitCount = zcrypt_requestq_count();
531 pstat->pendingqWaitCount = zcrypt_pendingq_count();
532 pstat->totalOpenCount = atomic_read(&zcrypt_open_count);
533 pstat->cryptoDomain = ap_domain_index;
534 zcrypt_status_mask(pstat->status);
535 zcrypt_qdepth_mask(pstat->qdepth);
536 ret = 0;
537 if (copy_to_user((void __user *) arg, pstat, sizeof(*pstat)))
538 ret = -EFAULT;
539 kfree(pstat);
540 return ret;
541}
542
543static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
544 unsigned long arg)
545{
546 int rc;
547
548 switch (cmd) {
549 case ICARSAMODEXPO: {
550 struct ica_rsa_modexpo __user *umex = (void __user *) arg;
551 struct ica_rsa_modexpo mex;
552 if (copy_from_user(&mex, umex, sizeof(mex)))
553 return -EFAULT;
554 do {
555 rc = zcrypt_rsa_modexpo(&mex);
556 } while (rc == -EAGAIN);
557 if (rc)
558 return rc;
559 return put_user(mex.outputdatalength, &umex->outputdatalength);
560 }
561 case ICARSACRT: {
562 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
563 struct ica_rsa_modexpo_crt crt;
564 if (copy_from_user(&crt, ucrt, sizeof(crt)))
565 return -EFAULT;
566 do {
567 rc = zcrypt_rsa_crt(&crt);
568 } while (rc == -EAGAIN);
569 if (rc)
570 return rc;
571 return put_user(crt.outputdatalength, &ucrt->outputdatalength);
572 }
573 case ZSECSENDCPRB: {
574 struct ica_xcRB __user *uxcRB = (void __user *) arg;
575 struct ica_xcRB xcRB;
576 if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
577 return -EFAULT;
578 do {
579 rc = zcrypt_send_cprb(&xcRB);
580 } while (rc == -EAGAIN);
581 if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
582 return -EFAULT;
583 return rc;
584 }
585 case Z90STAT_STATUS_MASK: {
586 char status[AP_DEVICES];
587 zcrypt_status_mask(status);
588 if (copy_to_user((char __user *) arg, status,
589 sizeof(char) * AP_DEVICES))
590 return -EFAULT;
591 return 0;
592 }
593 case Z90STAT_QDEPTH_MASK: {
594 char qdepth[AP_DEVICES];
595 zcrypt_qdepth_mask(qdepth);
596 if (copy_to_user((char __user *) arg, qdepth,
597 sizeof(char) * AP_DEVICES))
598 return -EFAULT;
599 return 0;
600 }
601 case Z90STAT_PERDEV_REQCNT: {
602 int reqcnt[AP_DEVICES];
603 zcrypt_perdev_reqcnt(reqcnt);
604 if (copy_to_user((int __user *) arg, reqcnt,
605 sizeof(int) * AP_DEVICES))
606 return -EFAULT;
607 return 0;
608 }
609 case Z90STAT_REQUESTQ_COUNT:
610 return put_user(zcrypt_requestq_count(), (int __user *) arg);
611 case Z90STAT_PENDINGQ_COUNT:
612 return put_user(zcrypt_pendingq_count(), (int __user *) arg);
613 case Z90STAT_TOTALOPEN_COUNT:
614 return put_user(atomic_read(&zcrypt_open_count),
615 (int __user *) arg);
616 case Z90STAT_DOMAIN_INDEX:
617 return put_user(ap_domain_index, (int __user *) arg);
618 /**
619 * Deprecated ioctls. Don't add another device count ioctl,
620 * you can count them yourself in the user space with the
621 * output of the Z90STAT_STATUS_MASK ioctl.
622 */
623 case ICAZ90STATUS:
624 return zcrypt_ica_status(filp, arg);
625 case Z90STAT_TOTALCOUNT:
626 return put_user(zcrypt_device_count, (int __user *) arg);
627 case Z90STAT_PCICACOUNT:
628 return put_user(zcrypt_count_type(ZCRYPT_PCICA),
629 (int __user *) arg);
630 case Z90STAT_PCICCCOUNT:
631 return put_user(zcrypt_count_type(ZCRYPT_PCICC),
632 (int __user *) arg);
633 case Z90STAT_PCIXCCMCL2COUNT:
634 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2),
635 (int __user *) arg);
636 case Z90STAT_PCIXCCMCL3COUNT:
637 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL3),
638 (int __user *) arg);
639 case Z90STAT_PCIXCCCOUNT:
640 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2) +
641 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3),
642 (int __user *) arg);
643 case Z90STAT_CEX2CCOUNT:
644 return put_user(zcrypt_count_type(ZCRYPT_CEX2C),
645 (int __user *) arg);
646 case Z90STAT_CEX2ACOUNT:
647 return put_user(zcrypt_count_type(ZCRYPT_CEX2A),
648 (int __user *) arg);
649 default:
650 /* unknown ioctl number */
651 return -ENOIOCTLCMD;
652 }
653}
654
655#ifdef CONFIG_COMPAT
656/**
657 * ioctl32 conversion routines
658 */
659struct compat_ica_rsa_modexpo {
660 compat_uptr_t inputdata;
661 unsigned int inputdatalength;
662 compat_uptr_t outputdata;
663 unsigned int outputdatalength;
664 compat_uptr_t b_key;
665 compat_uptr_t n_modulus;
666};
667
668static long trans_modexpo32(struct file *filp, unsigned int cmd,
669 unsigned long arg)
670{
671 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
672 struct compat_ica_rsa_modexpo mex32;
673 struct ica_rsa_modexpo mex64;
674 long rc;
675
676 if (copy_from_user(&mex32, umex32, sizeof(mex32)))
677 return -EFAULT;
678 mex64.inputdata = compat_ptr(mex32.inputdata);
679 mex64.inputdatalength = mex32.inputdatalength;
680 mex64.outputdata = compat_ptr(mex32.outputdata);
681 mex64.outputdatalength = mex32.outputdatalength;
682 mex64.b_key = compat_ptr(mex32.b_key);
683 mex64.n_modulus = compat_ptr(mex32.n_modulus);
684 do {
685 rc = zcrypt_rsa_modexpo(&mex64);
686 } while (rc == -EAGAIN);
687 if (!rc)
688 rc = put_user(mex64.outputdatalength,
689 &umex32->outputdatalength);
690 return rc;
691}
692
693struct compat_ica_rsa_modexpo_crt {
694 compat_uptr_t inputdata;
695 unsigned int inputdatalength;
696 compat_uptr_t outputdata;
697 unsigned int outputdatalength;
698 compat_uptr_t bp_key;
699 compat_uptr_t bq_key;
700 compat_uptr_t np_prime;
701 compat_uptr_t nq_prime;
702 compat_uptr_t u_mult_inv;
703};
704
705static long trans_modexpo_crt32(struct file *filp, unsigned int cmd,
706 unsigned long arg)
707{
708 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
709 struct compat_ica_rsa_modexpo_crt crt32;
710 struct ica_rsa_modexpo_crt crt64;
711 long rc;
712
713 if (copy_from_user(&crt32, ucrt32, sizeof(crt32)))
714 return -EFAULT;
715 crt64.inputdata = compat_ptr(crt32.inputdata);
716 crt64.inputdatalength = crt32.inputdatalength;
717 crt64.outputdata= compat_ptr(crt32.outputdata);
718 crt64.outputdatalength = crt32.outputdatalength;
719 crt64.bp_key = compat_ptr(crt32.bp_key);
720 crt64.bq_key = compat_ptr(crt32.bq_key);
721 crt64.np_prime = compat_ptr(crt32.np_prime);
722 crt64.nq_prime = compat_ptr(crt32.nq_prime);
723 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
724 do {
725 rc = zcrypt_rsa_crt(&crt64);
726 } while (rc == -EAGAIN);
727 if (!rc)
728 rc = put_user(crt64.outputdatalength,
729 &ucrt32->outputdatalength);
730 return rc;
731}
732
733struct compat_ica_xcRB {
734 unsigned short agent_ID;
735 unsigned int user_defined;
736 unsigned short request_ID;
737 unsigned int request_control_blk_length;
738 unsigned char padding1[16 - sizeof (compat_uptr_t)];
739 compat_uptr_t request_control_blk_addr;
740 unsigned int request_data_length;
741 char padding2[16 - sizeof (compat_uptr_t)];
742 compat_uptr_t request_data_address;
743 unsigned int reply_control_blk_length;
744 char padding3[16 - sizeof (compat_uptr_t)];
745 compat_uptr_t reply_control_blk_addr;
746 unsigned int reply_data_length;
747 char padding4[16 - sizeof (compat_uptr_t)];
748 compat_uptr_t reply_data_addr;
749 unsigned short priority_window;
750 unsigned int status;
751} __attribute__((packed));
752
753static long trans_xcRB32(struct file *filp, unsigned int cmd,
754 unsigned long arg)
755{
756 struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg);
757 struct compat_ica_xcRB xcRB32;
758 struct ica_xcRB xcRB64;
759 long rc;
760
761 if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32)))
762 return -EFAULT;
763 xcRB64.agent_ID = xcRB32.agent_ID;
764 xcRB64.user_defined = xcRB32.user_defined;
765 xcRB64.request_ID = xcRB32.request_ID;
766 xcRB64.request_control_blk_length =
767 xcRB32.request_control_blk_length;
768 xcRB64.request_control_blk_addr =
769 compat_ptr(xcRB32.request_control_blk_addr);
770 xcRB64.request_data_length =
771 xcRB32.request_data_length;
772 xcRB64.request_data_address =
773 compat_ptr(xcRB32.request_data_address);
774 xcRB64.reply_control_blk_length =
775 xcRB32.reply_control_blk_length;
776 xcRB64.reply_control_blk_addr =
777 compat_ptr(xcRB32.reply_control_blk_addr);
778 xcRB64.reply_data_length = xcRB32.reply_data_length;
779 xcRB64.reply_data_addr =
780 compat_ptr(xcRB32.reply_data_addr);
781 xcRB64.priority_window = xcRB32.priority_window;
782 xcRB64.status = xcRB32.status;
783 do {
784 rc = zcrypt_send_cprb(&xcRB64);
785 } while (rc == -EAGAIN);
786 xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
787 xcRB32.reply_data_length = xcRB64.reply_data_length;
788 xcRB32.status = xcRB64.status;
789 if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32)))
790 return -EFAULT;
791 return rc;
792}
793
794long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
795 unsigned long arg)
796{
797 if (cmd == ICARSAMODEXPO)
798 return trans_modexpo32(filp, cmd, arg);
799 if (cmd == ICARSACRT)
800 return trans_modexpo_crt32(filp, cmd, arg);
801 if (cmd == ZSECSENDCPRB)
802 return trans_xcRB32(filp, cmd, arg);
803 return zcrypt_unlocked_ioctl(filp, cmd, arg);
804}
805#endif
806
807/**
808 * Misc device file operations.
809 */
810static struct file_operations zcrypt_fops = {
811 .owner = THIS_MODULE,
812 .read = zcrypt_read,
813 .write = zcrypt_write,
814 .unlocked_ioctl = zcrypt_unlocked_ioctl,
815#ifdef CONFIG_COMPAT
816 .compat_ioctl = zcrypt_compat_ioctl,
817#endif
818 .open = zcrypt_open,
819 .release = zcrypt_release
820};
821
822/**
823 * Misc device.
824 */
825static struct miscdevice zcrypt_misc_device = {
826 .minor = MISC_DYNAMIC_MINOR,
827 .name = "z90crypt",
828 .fops = &zcrypt_fops,
829};
830
831/**
832 * Deprecated /proc entry support.
833 */
834static struct proc_dir_entry *zcrypt_entry;
835
836static inline int sprintcl(unsigned char *outaddr, unsigned char *addr,
837 unsigned int len)
838{
839 int hl, i;
840
841 hl = 0;
842 for (i = 0; i < len; i++)
843 hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]);
844 hl += sprintf(outaddr+hl, " ");
845 return hl;
846}
847
848static inline int sprintrw(unsigned char *outaddr, unsigned char *addr,
849 unsigned int len)
850{
851 int hl, inl, c, cx;
852
853 hl = sprintf(outaddr, " ");
854 inl = 0;
855 for (c = 0; c < (len / 16); c++) {
856 hl += sprintcl(outaddr+hl, addr+inl, 16);
857 inl += 16;
858 }
859 cx = len%16;
860 if (cx) {
861 hl += sprintcl(outaddr+hl, addr+inl, cx);
862 inl += cx;
863 }
864 hl += sprintf(outaddr+hl, "\n");
865 return hl;
866}
867
868static inline int sprinthx(unsigned char *title, unsigned char *outaddr,
869 unsigned char *addr, unsigned int len)
870{
871 int hl, inl, r, rx;
872
873 hl = sprintf(outaddr, "\n%s\n", title);
874 inl = 0;
875 for (r = 0; r < (len / 64); r++) {
876 hl += sprintrw(outaddr+hl, addr+inl, 64);
877 inl += 64;
878 }
879 rx = len % 64;
880 if (rx) {
881 hl += sprintrw(outaddr+hl, addr+inl, rx);
882 inl += rx;
883 }
884 hl += sprintf(outaddr+hl, "\n");
885 return hl;
886}
887
888static inline int sprinthx4(unsigned char *title, unsigned char *outaddr,
889 unsigned int *array, unsigned int len)
890{
891 int hl, r;
892
893 hl = sprintf(outaddr, "\n%s\n", title);
894 for (r = 0; r < len; r++) {
895 if ((r % 8) == 0)
896 hl += sprintf(outaddr+hl, " ");
897 hl += sprintf(outaddr+hl, "%08X ", array[r]);
898 if ((r % 8) == 7)
899 hl += sprintf(outaddr+hl, "\n");
900 }
901 hl += sprintf(outaddr+hl, "\n");
902 return hl;
903}
904
905static int zcrypt_status_read(char *resp_buff, char **start, off_t offset,
906 int count, int *eof, void *data)
907{
908 unsigned char *workarea;
909 int len;
910
911 len = 0;
912
913 /* resp_buff is a page. Use the right half for a work area */
914 workarea = resp_buff + 2000;
915 len += sprintf(resp_buff + len, "\nzcrypt version: %d.%d.%d\n",
916 ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT);
917 len += sprintf(resp_buff + len, "Cryptographic domain: %d\n",
918 ap_domain_index);
919 len += sprintf(resp_buff + len, "Total device count: %d\n",
920 zcrypt_device_count);
921 len += sprintf(resp_buff + len, "PCICA count: %d\n",
922 zcrypt_count_type(ZCRYPT_PCICA));
923 len += sprintf(resp_buff + len, "PCICC count: %d\n",
924 zcrypt_count_type(ZCRYPT_PCICC));
925 len += sprintf(resp_buff + len, "PCIXCC MCL2 count: %d\n",
926 zcrypt_count_type(ZCRYPT_PCIXCC_MCL2));
927 len += sprintf(resp_buff + len, "PCIXCC MCL3 count: %d\n",
928 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3));
929 len += sprintf(resp_buff + len, "CEX2C count: %d\n",
930 zcrypt_count_type(ZCRYPT_CEX2C));
931 len += sprintf(resp_buff + len, "CEX2A count: %d\n",
932 zcrypt_count_type(ZCRYPT_CEX2A));
933 len += sprintf(resp_buff + len, "requestq count: %d\n",
934 zcrypt_requestq_count());
935 len += sprintf(resp_buff + len, "pendingq count: %d\n",
936 zcrypt_pendingq_count());
937 len += sprintf(resp_buff + len, "Total open handles: %d\n\n",
938 atomic_read(&zcrypt_open_count));
939 zcrypt_status_mask(workarea);
940 len += sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
941 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A",
942 resp_buff+len, workarea, AP_DEVICES);
943 zcrypt_qdepth_mask(workarea);
944 len += sprinthx("Waiting work element counts",
945 resp_buff+len, workarea, AP_DEVICES);
946 zcrypt_perdev_reqcnt((unsigned int *) workarea);
947 len += sprinthx4("Per-device successfully completed request counts",
948 resp_buff+len,(unsigned int *) workarea, AP_DEVICES);
949 *eof = 1;
950 memset((void *) workarea, 0x00, AP_DEVICES * sizeof(unsigned int));
951 return len;
952}
953
954static void zcrypt_disable_card(int index)
955{
956 struct zcrypt_device *zdev;
957
958 spin_lock_bh(&zcrypt_device_lock);
959 list_for_each_entry(zdev, &zcrypt_device_list, list)
960 if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) {
961 zdev->online = 0;
962 ap_flush_queue(zdev->ap_dev);
963 break;
964 }
965 spin_unlock_bh(&zcrypt_device_lock);
966}
967
968static void zcrypt_enable_card(int index)
969{
970 struct zcrypt_device *zdev;
971
972 spin_lock_bh(&zcrypt_device_lock);
973 list_for_each_entry(zdev, &zcrypt_device_list, list)
974 if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) {
975 zdev->online = 1;
976 break;
977 }
978 spin_unlock_bh(&zcrypt_device_lock);
979}
980
981static int zcrypt_status_write(struct file *file, const char __user *buffer,
982 unsigned long count, void *data)
983{
984 unsigned char *lbuf, *ptr;
985 unsigned long local_count;
986 int j;
987
988 if (count <= 0)
989 return 0;
990
991#define LBUFSIZE 1200UL
992 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
993 if (!lbuf) {
994 PRINTK("kmalloc failed!\n");
995 return 0;
996 }
997
998 local_count = min(LBUFSIZE - 1, count);
999 if (copy_from_user(lbuf, buffer, local_count) != 0) {
1000 kfree(lbuf);
1001 return -EFAULT;
1002 }
1003 lbuf[local_count] = '\0';
1004
1005 ptr = strstr(lbuf, "Online devices");
1006 if (!ptr) {
1007 PRINTK("Unable to parse data (missing \"Online devices\")\n");
1008 goto out;
1009 }
1010 ptr = strstr(ptr, "\n");
1011 if (!ptr) {
1012 PRINTK("Unable to parse data (missing newline "
1013 "after \"Online devices\")\n");
1014 goto out;
1015 }
1016 ptr++;
1017
1018 if (strstr(ptr, "Waiting work element counts") == NULL) {
1019 PRINTK("Unable to parse data (missing "
1020 "\"Waiting work element counts\")\n");
1021 goto out;
1022 }
1023
1024 for (j = 0; j < 64 && *ptr; ptr++) {
1025 /**
1026 * '0' for no device, '1' for PCICA, '2' for PCICC,
1027 * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3,
1028 * '5' for CEX2C and '6' for CEX2A'
1029 */
1030 if (*ptr >= '0' && *ptr <= '6')
1031 j++;
1032 else if (*ptr == 'd' || *ptr == 'D')
1033 zcrypt_disable_card(j++);
1034 else if (*ptr == 'e' || *ptr == 'E')
1035 zcrypt_enable_card(j++);
1036 else if (*ptr != ' ' && *ptr != '\t')
1037 break;
1038 }
1039out:
1040 kfree(lbuf);
1041 return count;
1042}
1043
1044/**
1045 * The module initialization code.
1046 */
1047int __init zcrypt_api_init(void)
1048{
1049 int rc;
1050
1051 /* Register the request sprayer. */
1052 rc = misc_register(&zcrypt_misc_device);
1053 if (rc < 0) {
1054 PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n",
1055 zcrypt_misc_device.minor, rc);
1056 goto out;
1057 }
1058
1059 /* Set up the proc file system */
1060 zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL);
1061 if (!zcrypt_entry) {
1062 PRINTK("Couldn't create z90crypt proc entry\n");
1063 rc = -ENOMEM;
1064 goto out_misc;
1065 }
1066 zcrypt_entry->nlink = 1;
1067 zcrypt_entry->data = NULL;
1068 zcrypt_entry->read_proc = zcrypt_status_read;
1069 zcrypt_entry->write_proc = zcrypt_status_write;
1070
1071 return 0;
1072
1073out_misc:
1074 misc_deregister(&zcrypt_misc_device);
1075out:
1076 return rc;
1077}
1078
1079/**
1080 * The module termination code.
1081 */
1082void zcrypt_api_exit(void)
1083{
1084 remove_proc_entry("driver/z90crypt", NULL);
1085 misc_deregister(&zcrypt_misc_device);
1086}
1087
1088#ifndef CONFIG_ZCRYPT_MONOLITHIC
1089module_init(zcrypt_api_init);
1090module_exit(zcrypt_api_exit);
1091#endif
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
new file mode 100644
index 000000000000..de4877ee618f
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -0,0 +1,141 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_api.h
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 * Cornelia Huck <cornelia.huck@de.ibm.com>
10 *
11 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
12 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
13 * Ralph Wuerthner <rwuerthn@de.ibm.com>
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2, or (at your option)
18 * any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
28 */
29
30#ifndef _ZCRYPT_API_H_
31#define _ZCRYPT_API_H_
32
33/**
34 * Macro definitions
35 *
36 * PDEBUG debugs in the form "zcrypt: function_name -> message"
37 *
38 * PRINTK is like PDEBUG, except that it is always enabled
39 * PRINTKN is like PRINTK, except that it does not include the function name
40 * PRINTKW is like PRINTK, except that it uses KERN_WARNING
41 * PRINTKC is like PRINTK, except that it uses KERN_CRIT
42 */
43#define DEV_NAME "zcrypt"
44
45#define PRINTK(fmt, args...) \
46 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
47#define PRINTKN(fmt, args...) \
48 printk(KERN_DEBUG DEV_NAME ": " fmt, ## args)
49#define PRINTKW(fmt, args...) \
50 printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
51#define PRINTKC(fmt, args...) \
52 printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
53
54#ifdef ZCRYPT_DEBUG
55#define PDEBUG(fmt, args...) \
56 printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
57#else
58#define PDEBUG(fmt, args...) do {} while (0)
59#endif
60
61#include "ap_bus.h"
62#include <asm/zcrypt.h>
63
64/* deprecated status calls */
65#define ICAZ90STATUS _IOR(ZCRYPT_IOCTL_MAGIC, 0x10, struct ica_z90_status)
66#define Z90STAT_PCIXCCCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x43, int)
67
68/**
69 * This structure is deprecated and the corresponding ioctl() has been
70 * replaced with individual ioctl()s for each piece of data!
71 */
72struct ica_z90_status {
73 int totalcount;
74 int leedslitecount; // PCICA
75 int leeds2count; // PCICC
76 // int PCIXCCCount; is not in struct for backward compatibility
77 int requestqWaitCount;
78 int pendingqWaitCount;
79 int totalOpenCount;
80 int cryptoDomain;
81 // status: 0=not there, 1=PCICA, 2=PCICC, 3=PCIXCC_MCL2, 4=PCIXCC_MCL3,
82 // 5=CEX2C
83 unsigned char status[64];
84 // qdepth: # work elements waiting for each device
85 unsigned char qdepth[64];
86};
87
88/**
89 * device type for an actual device is either PCICA, PCICC, PCIXCC_MCL2,
90 * PCIXCC_MCL3, CEX2C, or CEX2A
91 *
92 * NOTE: PCIXCC_MCL3 refers to a PCIXCC with May 2004 version of Licensed
93 * Internal Code (LIC) (EC J12220 level 29).
94 * PCIXCC_MCL2 refers to any LIC before this level.
95 */
96#define ZCRYPT_PCICA 1
97#define ZCRYPT_PCICC 2
98#define ZCRYPT_PCIXCC_MCL2 3
99#define ZCRYPT_PCIXCC_MCL3 4
100#define ZCRYPT_CEX2C 5
101#define ZCRYPT_CEX2A 6
102
103struct zcrypt_device;
104
105struct zcrypt_ops {
106 long (*rsa_modexpo)(struct zcrypt_device *, struct ica_rsa_modexpo *);
107 long (*rsa_modexpo_crt)(struct zcrypt_device *,
108 struct ica_rsa_modexpo_crt *);
109 long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *);
110};
111
112struct zcrypt_device {
113 struct list_head list; /* Device list. */
114 spinlock_t lock; /* Per device lock. */
115 struct kref refcount; /* device refcounting */
116 struct ap_device *ap_dev; /* The "real" ap device. */
117 struct zcrypt_ops *ops; /* Crypto operations. */
118 int online; /* User online/offline */
119
120 int user_space_type; /* User space device id. */
121 char *type_string; /* User space device name. */
122 int min_mod_size; /* Min number of bits. */
123 int max_mod_size; /* Max number of bits. */
124 int short_crt; /* Card has crt length restriction. */
125 int speed_rating; /* Speed of the crypto device. */
126
127 int request_count; /* # current requests. */
128
129 struct ap_message reply; /* Per-device reply structure. */
130};
131
132struct zcrypt_device *zcrypt_device_alloc(size_t);
133void zcrypt_device_free(struct zcrypt_device *);
134void zcrypt_device_get(struct zcrypt_device *);
135int zcrypt_device_put(struct zcrypt_device *);
136int zcrypt_device_register(struct zcrypt_device *);
137void zcrypt_device_unregister(struct zcrypt_device *);
138int zcrypt_api_init(void);
139void zcrypt_api_exit(void);
140
141#endif /* _ZCRYPT_API_H_ */
diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h
new file mode 100644
index 000000000000..8dbcf0eef3e5
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cca_key.h
@@ -0,0 +1,350 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_cca_key.h
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#ifndef _ZCRYPT_CCA_KEY_H_
29#define _ZCRYPT_CCA_KEY_H_
30
31struct T6_keyBlock_hdr {
32 unsigned short blen;
33 unsigned short ulen;
34 unsigned short flags;
35};
36
37/**
38 * mapping for the cca private ME key token.
39 * Three parts of interest here: the header, the private section and
40 * the public section.
41 *
42 * mapping for the cca key token header
43 */
44struct cca_token_hdr {
45 unsigned char token_identifier;
46 unsigned char version;
47 unsigned short token_length;
48 unsigned char reserved[4];
49} __attribute__((packed));
50
51#define CCA_TKN_HDR_ID_EXT 0x1E
52
53/**
54 * mapping for the cca private ME section
55 */
56struct cca_private_ext_ME_sec {
57 unsigned char section_identifier;
58 unsigned char version;
59 unsigned short section_length;
60 unsigned char private_key_hash[20];
61 unsigned char reserved1[4];
62 unsigned char key_format;
63 unsigned char reserved2;
64 unsigned char key_name_hash[20];
65 unsigned char key_use_flags[4];
66 unsigned char reserved3[6];
67 unsigned char reserved4[24];
68 unsigned char confounder[24];
69 unsigned char exponent[128];
70 unsigned char modulus[128];
71} __attribute__((packed));
72
73#define CCA_PVT_USAGE_ALL 0x80
74
75/**
76 * mapping for the cca public section
77 * In a private key, the modulus doesn't appear in the public
78 * section. So, an arbitrary public exponent of 0x010001 will be
79 * used, for a section length of 0x0F always.
80 */
81struct cca_public_sec {
82 unsigned char section_identifier;
83 unsigned char version;
84 unsigned short section_length;
85 unsigned char reserved[2];
86 unsigned short exponent_len;
87 unsigned short modulus_bit_len;
88 unsigned short modulus_byte_len; /* In a private key, this is 0 */
89} __attribute__((packed));
90
91/**
92 * mapping for the cca private CRT key 'token'
93 * The first three parts (the only parts considered in this release)
94 * are: the header, the private section and the public section.
95 * The header and public section are the same as for the
96 * struct cca_private_ext_ME
97 *
98 * Following the structure are the quantities p, q, dp, dq, u, pad,
99 * and modulus, in that order, where pad_len is the modulo 8
100 * complement of the residue modulo 8 of the sum of
101 * (p_len + q_len + dp_len + dq_len + u_len).
102 */
103struct cca_pvt_ext_CRT_sec {
104 unsigned char section_identifier;
105 unsigned char version;
106 unsigned short section_length;
107 unsigned char private_key_hash[20];
108 unsigned char reserved1[4];
109 unsigned char key_format;
110 unsigned char reserved2;
111 unsigned char key_name_hash[20];
112 unsigned char key_use_flags[4];
113 unsigned short p_len;
114 unsigned short q_len;
115 unsigned short dp_len;
116 unsigned short dq_len;
117 unsigned short u_len;
118 unsigned short mod_len;
119 unsigned char reserved3[4];
120 unsigned short pad_len;
121 unsigned char reserved4[52];
122 unsigned char confounder[8];
123} __attribute__((packed));
124
125#define CCA_PVT_EXT_CRT_SEC_ID_PVT 0x08
126#define CCA_PVT_EXT_CRT_SEC_FMT_CL 0x40
127
128/**
129 * Set up private key fields of a type6 MEX message.
130 * Note that all numerics in the key token are big-endian,
131 * while the entries in the key block header are little-endian.
132 *
133 * @mex: pointer to user input data
134 * @p: pointer to memory area for the key
135 *
136 * Returns the size of the key area or -EFAULT
137 */
138static inline int zcrypt_type6_mex_key_de(struct ica_rsa_modexpo *mex,
139 void *p, int big_endian)
140{
141 static struct cca_token_hdr static_pvt_me_hdr = {
142 .token_identifier = 0x1E,
143 .token_length = 0x0183,
144 };
145 static struct cca_private_ext_ME_sec static_pvt_me_sec = {
146 .section_identifier = 0x02,
147 .section_length = 0x016C,
148 .key_use_flags = {0x80,0x00,0x00,0x00},
149 };
150 static struct cca_public_sec static_pub_me_sec = {
151 .section_identifier = 0x04,
152 .section_length = 0x000F,
153 .exponent_len = 0x0003,
154 };
155 static char pk_exponent[3] = { 0x01, 0x00, 0x01 };
156 struct {
157 struct T6_keyBlock_hdr t6_hdr;
158 struct cca_token_hdr pvtMeHdr;
159 struct cca_private_ext_ME_sec pvtMeSec;
160 struct cca_public_sec pubMeSec;
161 char exponent[3];
162 } __attribute__((packed)) *key = p;
163 unsigned char *temp;
164
165 memset(key, 0, sizeof(*key));
166
167 if (big_endian) {
168 key->t6_hdr.blen = cpu_to_be16(0x189);
169 key->t6_hdr.ulen = cpu_to_be16(0x189 - 2);
170 } else {
171 key->t6_hdr.blen = cpu_to_le16(0x189);
172 key->t6_hdr.ulen = cpu_to_le16(0x189 - 2);
173 }
174 key->pvtMeHdr = static_pvt_me_hdr;
175 key->pvtMeSec = static_pvt_me_sec;
176 key->pubMeSec = static_pub_me_sec;
177 /**
178 * In a private key, the modulus doesn't appear in the public
179 * section. So, an arbitrary public exponent of 0x010001 will be
180 * used.
181 */
182 memcpy(key->exponent, pk_exponent, 3);
183
184 /* key parameter block */
185 temp = key->pvtMeSec.exponent +
186 sizeof(key->pvtMeSec.exponent) - mex->inputdatalength;
187 if (copy_from_user(temp, mex->b_key, mex->inputdatalength))
188 return -EFAULT;
189
190 /* modulus */
191 temp = key->pvtMeSec.modulus +
192 sizeof(key->pvtMeSec.modulus) - mex->inputdatalength;
193 if (copy_from_user(temp, mex->n_modulus, mex->inputdatalength))
194 return -EFAULT;
195 key->pubMeSec.modulus_bit_len = 8 * mex->inputdatalength;
196 return sizeof(*key);
197}
198
199/**
200 * Set up private key fields of a type6 MEX message. The _pad variant
201 * strips leading zeroes from the b_key.
202 * Note that all numerics in the key token are big-endian,
203 * while the entries in the key block header are little-endian.
204 *
205 * @mex: pointer to user input data
206 * @p: pointer to memory area for the key
207 *
208 * Returns the size of the key area or -EFAULT
209 */
210static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex,
211 void *p, int big_endian)
212{
213 static struct cca_token_hdr static_pub_hdr = {
214 .token_identifier = 0x1E,
215 };
216 static struct cca_public_sec static_pub_sec = {
217 .section_identifier = 0x04,
218 };
219 struct {
220 struct T6_keyBlock_hdr t6_hdr;
221 struct cca_token_hdr pubHdr;
222 struct cca_public_sec pubSec;
223 char exponent[0];
224 } __attribute__((packed)) *key = p;
225 unsigned char *temp;
226 int i;
227
228 memset(key, 0, sizeof(*key));
229
230 key->pubHdr = static_pub_hdr;
231 key->pubSec = static_pub_sec;
232
233 /* key parameter block */
234 temp = key->exponent;
235 if (copy_from_user(temp, mex->b_key, mex->inputdatalength))
236 return -EFAULT;
237 /* Strip leading zeroes from b_key. */
238 for (i = 0; i < mex->inputdatalength; i++)
239 if (temp[i])
240 break;
241 if (i >= mex->inputdatalength)
242 return -EINVAL;
243 memmove(temp, temp + i, mex->inputdatalength - i);
244 temp += mex->inputdatalength - i;
245 /* modulus */
246 if (copy_from_user(temp, mex->n_modulus, mex->inputdatalength))
247 return -EFAULT;
248
249 key->pubSec.modulus_bit_len = 8 * mex->inputdatalength;
250 key->pubSec.modulus_byte_len = mex->inputdatalength;
251 key->pubSec.exponent_len = mex->inputdatalength - i;
252 key->pubSec.section_length = sizeof(key->pubSec) +
253 2*mex->inputdatalength - i;
254 key->pubHdr.token_length =
255 key->pubSec.section_length + sizeof(key->pubHdr);
256 if (big_endian) {
257 key->t6_hdr.ulen = cpu_to_be16(key->pubHdr.token_length + 4);
258 key->t6_hdr.blen = cpu_to_be16(key->pubHdr.token_length + 6);
259 } else {
260 key->t6_hdr.ulen = cpu_to_le16(key->pubHdr.token_length + 4);
261 key->t6_hdr.blen = cpu_to_le16(key->pubHdr.token_length + 6);
262 }
263 return sizeof(*key) + 2*mex->inputdatalength - i;
264}
265
266/**
267 * Set up private key fields of a type6 CRT message.
268 * Note that all numerics in the key token are big-endian,
269 * while the entries in the key block header are little-endian.
270 *
271 * @mex: pointer to user input data
272 * @p: pointer to memory area for the key
273 *
274 * Returns the size of the key area or -EFAULT
275 */
276static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt,
277 void *p, int big_endian)
278{
279 static struct cca_public_sec static_cca_pub_sec = {
280 .section_identifier = 4,
281 .section_length = 0x000f,
282 .exponent_len = 0x0003,
283 };
284 static char pk_exponent[3] = { 0x01, 0x00, 0x01 };
285 struct {
286 struct T6_keyBlock_hdr t6_hdr;
287 struct cca_token_hdr token;
288 struct cca_pvt_ext_CRT_sec pvt;
289 char key_parts[0];
290 } __attribute__((packed)) *key = p;
291 struct cca_public_sec *pub;
292 int short_len, long_len, pad_len, key_len, size;
293
294 memset(key, 0, sizeof(*key));
295
296 short_len = crt->inputdatalength / 2;
297 long_len = short_len + 8;
298 pad_len = -(3*long_len + 2*short_len) & 7;
299 key_len = 3*long_len + 2*short_len + pad_len + crt->inputdatalength;
300 size = sizeof(*key) + key_len + sizeof(*pub) + 3;
301
302 /* parameter block.key block */
303 if (big_endian) {
304 key->t6_hdr.blen = cpu_to_be16(size);
305 key->t6_hdr.ulen = cpu_to_be16(size - 2);
306 } else {
307 key->t6_hdr.blen = cpu_to_le16(size);
308 key->t6_hdr.ulen = cpu_to_le16(size - 2);
309 }
310
311 /* key token header */
312 key->token.token_identifier = CCA_TKN_HDR_ID_EXT;
313 key->token.token_length = size - 6;
314
315 /* private section */
316 key->pvt.section_identifier = CCA_PVT_EXT_CRT_SEC_ID_PVT;
317 key->pvt.section_length = sizeof(key->pvt) + key_len;
318 key->pvt.key_format = CCA_PVT_EXT_CRT_SEC_FMT_CL;
319 key->pvt.key_use_flags[0] = CCA_PVT_USAGE_ALL;
320 key->pvt.p_len = key->pvt.dp_len = key->pvt.u_len = long_len;
321 key->pvt.q_len = key->pvt.dq_len = short_len;
322 key->pvt.mod_len = crt->inputdatalength;
323 key->pvt.pad_len = pad_len;
324
325 /* key parts */
326 if (copy_from_user(key->key_parts, crt->np_prime, long_len) ||
327 copy_from_user(key->key_parts + long_len,
328 crt->nq_prime, short_len) ||
329 copy_from_user(key->key_parts + long_len + short_len,
330 crt->bp_key, long_len) ||
331 copy_from_user(key->key_parts + 2*long_len + short_len,
332 crt->bq_key, short_len) ||
333 copy_from_user(key->key_parts + 2*long_len + 2*short_len,
334 crt->u_mult_inv, long_len))
335 return -EFAULT;
336 memset(key->key_parts + 3*long_len + 2*short_len + pad_len,
337 0xff, crt->inputdatalength);
338 pub = (struct cca_public_sec *)(key->key_parts + key_len);
339 *pub = static_cca_pub_sec;
340 pub->modulus_bit_len = 8 * crt->inputdatalength;
341 /**
342 * In a private key, the modulus doesn't appear in the public
343 * section. So, an arbitrary public exponent of 0x010001 will be
344 * used.
345 */
346 memcpy((char *) (pub + 1), pk_exponent, 3);
347 return size;
348}
349
350#endif /* _ZCRYPT_CCA_KEY_H_ */
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
new file mode 100644
index 000000000000..a62b00083d0c
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -0,0 +1,435 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_cex2a.c
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 * Ralph Wuerthner <rwuerthn@de.ibm.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/err.h>
32#include <asm/atomic.h>
33#include <asm/uaccess.h>
34
35#include "ap_bus.h"
36#include "zcrypt_api.h"
37#include "zcrypt_error.h"
38#include "zcrypt_cex2a.h"
39
40#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */
41#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */
42
43#define CEX2A_SPEED_RATING 970
44
45#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */
46#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
47
48#define CEX2A_CLEANUP_TIME (15*HZ)
49
50static struct ap_device_id zcrypt_cex2a_ids[] = {
51 { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) },
52 { /* end of list */ },
53};
54
55#ifndef CONFIG_ZCRYPT_MONOLITHIC
56MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_ids);
57MODULE_AUTHOR("IBM Corporation");
58MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, "
59 "Copyright 2001, 2006 IBM Corporation");
60MODULE_LICENSE("GPL");
61#endif
62
63static int zcrypt_cex2a_probe(struct ap_device *ap_dev);
64static void zcrypt_cex2a_remove(struct ap_device *ap_dev);
65static void zcrypt_cex2a_receive(struct ap_device *, struct ap_message *,
66 struct ap_message *);
67
68static struct ap_driver zcrypt_cex2a_driver = {
69 .probe = zcrypt_cex2a_probe,
70 .remove = zcrypt_cex2a_remove,
71 .receive = zcrypt_cex2a_receive,
72 .ids = zcrypt_cex2a_ids,
73};
74
75/**
76 * Convert a ICAMEX message to a type50 MEX message.
77 *
78 * @zdev: crypto device pointer
79 * @zreq: crypto request pointer
80 * @mex: pointer to user input data
81 *
82 * Returns 0 on success or -EFAULT.
83 */
84static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev,
85 struct ap_message *ap_msg,
86 struct ica_rsa_modexpo *mex)
87{
88 unsigned char *mod, *exp, *inp;
89 int mod_len;
90
91 mod_len = mex->inputdatalength;
92
93 if (mod_len <= 128) {
94 struct type50_meb1_msg *meb1 = ap_msg->message;
95 memset(meb1, 0, sizeof(*meb1));
96 ap_msg->length = sizeof(*meb1);
97 meb1->header.msg_type_code = TYPE50_TYPE_CODE;
98 meb1->header.msg_len = sizeof(*meb1);
99 meb1->keyblock_type = TYPE50_MEB1_FMT;
100 mod = meb1->modulus + sizeof(meb1->modulus) - mod_len;
101 exp = meb1->exponent + sizeof(meb1->exponent) - mod_len;
102 inp = meb1->message + sizeof(meb1->message) - mod_len;
103 } else {
104 struct type50_meb2_msg *meb2 = ap_msg->message;
105 memset(meb2, 0, sizeof(*meb2));
106 ap_msg->length = sizeof(*meb2);
107 meb2->header.msg_type_code = TYPE50_TYPE_CODE;
108 meb2->header.msg_len = sizeof(*meb2);
109 meb2->keyblock_type = TYPE50_MEB2_FMT;
110 mod = meb2->modulus + sizeof(meb2->modulus) - mod_len;
111 exp = meb2->exponent + sizeof(meb2->exponent) - mod_len;
112 inp = meb2->message + sizeof(meb2->message) - mod_len;
113 }
114
115 if (copy_from_user(mod, mex->n_modulus, mod_len) ||
116 copy_from_user(exp, mex->b_key, mod_len) ||
117 copy_from_user(inp, mex->inputdata, mod_len))
118 return -EFAULT;
119 return 0;
120}
121
122/**
123 * Convert a ICACRT message to a type50 CRT message.
124 *
125 * @zdev: crypto device pointer
126 * @zreq: crypto request pointer
127 * @crt: pointer to user input data
128 *
129 * Returns 0 on success or -EFAULT.
130 */
131static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
132 struct ap_message *ap_msg,
133 struct ica_rsa_modexpo_crt *crt)
134{
135 int mod_len, short_len, long_len, long_offset;
136 unsigned char *p, *q, *dp, *dq, *u, *inp;
137
138 mod_len = crt->inputdatalength;
139 short_len = mod_len / 2;
140 long_len = mod_len / 2 + 8;
141
142 /*
143 * CEX2A cannot handle p, dp, or U > 128 bytes.
144 * If we have one of these, we need to do extra checking.
145 */
146 if (long_len > 128) {
147 /*
148 * zcrypt_rsa_crt already checked for the leading
149 * zeroes of np_prime, bp_key and u_mult_inc.
150 */
151 long_offset = long_len - 128;
152 long_len = 128;
153 } else
154 long_offset = 0;
155
156 /*
157 * Instead of doing extra work for p, dp, U > 64 bytes, we'll just use
158 * the larger message structure.
159 */
160 if (long_len <= 64) {
161 struct type50_crb1_msg *crb1 = ap_msg->message;
162 memset(crb1, 0, sizeof(*crb1));
163 ap_msg->length = sizeof(*crb1);
164 crb1->header.msg_type_code = TYPE50_TYPE_CODE;
165 crb1->header.msg_len = sizeof(*crb1);
166 crb1->keyblock_type = TYPE50_CRB1_FMT;
167 p = crb1->p + sizeof(crb1->p) - long_len;
168 q = crb1->q + sizeof(crb1->q) - short_len;
169 dp = crb1->dp + sizeof(crb1->dp) - long_len;
170 dq = crb1->dq + sizeof(crb1->dq) - short_len;
171 u = crb1->u + sizeof(crb1->u) - long_len;
172 inp = crb1->message + sizeof(crb1->message) - mod_len;
173 } else {
174 struct type50_crb2_msg *crb2 = ap_msg->message;
175 memset(crb2, 0, sizeof(*crb2));
176 ap_msg->length = sizeof(*crb2);
177 crb2->header.msg_type_code = TYPE50_TYPE_CODE;
178 crb2->header.msg_len = sizeof(*crb2);
179 crb2->keyblock_type = TYPE50_CRB2_FMT;
180 p = crb2->p + sizeof(crb2->p) - long_len;
181 q = crb2->q + sizeof(crb2->q) - short_len;
182 dp = crb2->dp + sizeof(crb2->dp) - long_len;
183 dq = crb2->dq + sizeof(crb2->dq) - short_len;
184 u = crb2->u + sizeof(crb2->u) - long_len;
185 inp = crb2->message + sizeof(crb2->message) - mod_len;
186 }
187
188 if (copy_from_user(p, crt->np_prime + long_offset, long_len) ||
189 copy_from_user(q, crt->nq_prime, short_len) ||
190 copy_from_user(dp, crt->bp_key + long_offset, long_len) ||
191 copy_from_user(dq, crt->bq_key, short_len) ||
192 copy_from_user(u, crt->u_mult_inv + long_offset, long_len) ||
193 copy_from_user(inp, crt->inputdata, mod_len))
194 return -EFAULT;
195
196
197 return 0;
198}
199
200/**
201 * Copy results from a type 80 reply message back to user space.
202 *
203 * @zdev: crypto device pointer
204 * @reply: reply AP message.
205 * @data: pointer to user output data
206 * @length: size of user output data
207 *
208 * Returns 0 on success or -EFAULT.
209 */
210static int convert_type80(struct zcrypt_device *zdev,
211 struct ap_message *reply,
212 char __user *outputdata,
213 unsigned int outputdatalength)
214{
215 struct type80_hdr *t80h = reply->message;
216 unsigned char *data;
217
218 if (t80h->len < sizeof(*t80h) + outputdatalength) {
219 /* The result is too short, the CEX2A card may not do that.. */
220 zdev->online = 0;
221 return -EAGAIN; /* repeat the request on a different device. */
222 }
223 BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
224 data = reply->message + t80h->len - outputdatalength;
225 if (copy_to_user(outputdata, data, outputdatalength))
226 return -EFAULT;
227 return 0;
228}
229
230static int convert_response(struct zcrypt_device *zdev,
231 struct ap_message *reply,
232 char __user *outputdata,
233 unsigned int outputdatalength)
234{
235 /* Response type byte is the second byte in the response. */
236 switch (((unsigned char *) reply->message)[1]) {
237 case TYPE82_RSP_CODE:
238 case TYPE88_RSP_CODE:
239 return convert_error(zdev, reply);
240 case TYPE80_RSP_CODE:
241 return convert_type80(zdev, reply,
242 outputdata, outputdatalength);
243 default: /* Unknown response type, this should NEVER EVER happen */
244 PRINTK("Unrecognized Message Header: %08x%08x\n",
245 *(unsigned int *) reply->message,
246 *(unsigned int *) (reply->message+4));
247 zdev->online = 0;
248 return -EAGAIN; /* repeat the request on a different device. */
249 }
250}
251
252/**
253 * This function is called from the AP bus code after a crypto request
254 * "msg" has finished with the reply message "reply".
255 * It is called from tasklet context.
256 * @ap_dev: pointer to the AP device
257 * @msg: pointer to the AP message
258 * @reply: pointer to the AP reply message
259 */
260static void zcrypt_cex2a_receive(struct ap_device *ap_dev,
261 struct ap_message *msg,
262 struct ap_message *reply)
263{
264 static struct error_hdr error_reply = {
265 .type = TYPE82_RSP_CODE,
266 .reply_code = REP82_ERROR_MACHINE_FAILURE,
267 };
268 struct type80_hdr *t80h = reply->message;
269 int length;
270
271 /* Copy the reply message to the request message buffer. */
272 if (IS_ERR(reply))
273 memcpy(msg->message, &error_reply, sizeof(error_reply));
274 else if (t80h->type == TYPE80_RSP_CODE) {
275 length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len);
276 memcpy(msg->message, reply->message, length);
277 } else
278 memcpy(msg->message, reply->message, sizeof error_reply);
279 complete((struct completion *) msg->private);
280}
281
282static atomic_t zcrypt_step = ATOMIC_INIT(0);
283
284/**
285 * The request distributor calls this function if it picked the CEX2A
286 * device to handle a modexpo request.
287 * @zdev: pointer to zcrypt_device structure that identifies the
288 * CEX2A device to the request distributor
289 * @mex: pointer to the modexpo request buffer
290 */
291static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
292 struct ica_rsa_modexpo *mex)
293{
294 struct ap_message ap_msg;
295 struct completion work;
296 int rc;
297
298 ap_msg.message = (void *) kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
299 if (!ap_msg.message)
300 return -ENOMEM;
301 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
302 atomic_inc_return(&zcrypt_step);
303 ap_msg.private = &work;
304 rc = ICAMEX_msg_to_type50MEX_msg(zdev, &ap_msg, mex);
305 if (rc)
306 goto out_free;
307 init_completion(&work);
308 ap_queue_message(zdev->ap_dev, &ap_msg);
309 rc = wait_for_completion_interruptible_timeout(
310 &work, CEX2A_CLEANUP_TIME);
311 if (rc > 0)
312 rc = convert_response(zdev, &ap_msg, mex->outputdata,
313 mex->outputdatalength);
314 else {
315 /* Signal pending or message timed out. */
316 ap_cancel_message(zdev->ap_dev, &ap_msg);
317 if (rc == 0)
318 /* Message timed out. */
319 rc = -ETIME;
320 }
321out_free:
322 kfree(ap_msg.message);
323 return rc;
324}
325
326/**
327 * The request distributor calls this function if it picked the CEX2A
328 * device to handle a modexpo_crt request.
329 * @zdev: pointer to zcrypt_device structure that identifies the
330 * CEX2A device to the request distributor
331 * @crt: pointer to the modexpoc_crt request buffer
332 */
333static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
334 struct ica_rsa_modexpo_crt *crt)
335{
336 struct ap_message ap_msg;
337 struct completion work;
338 int rc;
339
340 ap_msg.message = (void *) kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
341 if (!ap_msg.message)
342 return -ENOMEM;
343 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
344 atomic_inc_return(&zcrypt_step);
345 ap_msg.private = &work;
346 rc = ICACRT_msg_to_type50CRT_msg(zdev, &ap_msg, crt);
347 if (rc)
348 goto out_free;
349 init_completion(&work);
350 ap_queue_message(zdev->ap_dev, &ap_msg);
351 rc = wait_for_completion_interruptible_timeout(
352 &work, CEX2A_CLEANUP_TIME);
353 if (rc > 0)
354 rc = convert_response(zdev, &ap_msg, crt->outputdata,
355 crt->outputdatalength);
356 else {
357 /* Signal pending or message timed out. */
358 ap_cancel_message(zdev->ap_dev, &ap_msg);
359 if (rc == 0)
360 /* Message timed out. */
361 rc = -ETIME;
362 }
363out_free:
364 kfree(ap_msg.message);
365 return rc;
366}
367
368/**
369 * The crypto operations for a CEX2A card.
370 */
371static struct zcrypt_ops zcrypt_cex2a_ops = {
372 .rsa_modexpo = zcrypt_cex2a_modexpo,
373 .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt,
374};
375
376/**
377 * Probe function for CEX2A cards. It always accepts the AP device
378 * since the bus_match already checked the hardware type.
379 * @ap_dev: pointer to the AP device.
380 */
381static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
382{
383 struct zcrypt_device *zdev;
384 int rc;
385
386 zdev = zcrypt_device_alloc(CEX2A_MAX_RESPONSE_SIZE);
387 if (!zdev)
388 return -ENOMEM;
389 zdev->ap_dev = ap_dev;
390 zdev->ops = &zcrypt_cex2a_ops;
391 zdev->online = 1;
392 zdev->user_space_type = ZCRYPT_CEX2A;
393 zdev->type_string = "CEX2A";
394 zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
395 zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
396 zdev->short_crt = 1;
397 zdev->speed_rating = CEX2A_SPEED_RATING;
398 ap_dev->reply = &zdev->reply;
399 ap_dev->private = zdev;
400 rc = zcrypt_device_register(zdev);
401 if (rc)
402 goto out_free;
403 return 0;
404
405out_free:
406 ap_dev->private = NULL;
407 zcrypt_device_free(zdev);
408 return rc;
409}
410
411/**
412 * This is called to remove the extended CEX2A driver information
413 * if an AP device is removed.
414 */
415static void zcrypt_cex2a_remove(struct ap_device *ap_dev)
416{
417 struct zcrypt_device *zdev = ap_dev->private;
418
419 zcrypt_device_unregister(zdev);
420}
421
422int __init zcrypt_cex2a_init(void)
423{
424 return ap_driver_register(&zcrypt_cex2a_driver, THIS_MODULE, "cex2a");
425}
426
427void __exit zcrypt_cex2a_exit(void)
428{
429 ap_driver_unregister(&zcrypt_cex2a_driver);
430}
431
432#ifndef CONFIG_ZCRYPT_MONOLITHIC
433module_init(zcrypt_cex2a_init);
434module_exit(zcrypt_cex2a_exit);
435#endif
diff --git a/drivers/s390/crypto/zcrypt_cex2a.h b/drivers/s390/crypto/zcrypt_cex2a.h
new file mode 100644
index 000000000000..8f69d1dacab8
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_cex2a.h
@@ -0,0 +1,126 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_cex2a.h
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#ifndef _ZCRYPT_CEX2A_H_
29#define _ZCRYPT_CEX2A_H_
30
31/**
32 * The type 50 message family is associated with a CEX2A card.
33 *
34 * The four members of the family are described below.
35 *
36 * Note that all unsigned char arrays are right-justified and left-padded
37 * with zeroes.
38 *
39 * Note that all reserved fields must be zeroes.
40 */
41struct type50_hdr {
42 unsigned char reserved1;
43 unsigned char msg_type_code; /* 0x50 */
44 unsigned short msg_len;
45 unsigned char reserved2;
46 unsigned char ignored;
47 unsigned short reserved3;
48} __attribute__((packed));
49
50#define TYPE50_TYPE_CODE 0x50
51
52#define TYPE50_MEB1_FMT 0x0001
53#define TYPE50_MEB2_FMT 0x0002
54#define TYPE50_CRB1_FMT 0x0011
55#define TYPE50_CRB2_FMT 0x0012
56
57/* Mod-Exp, with a small modulus */
58struct type50_meb1_msg {
59 struct type50_hdr header;
60 unsigned short keyblock_type; /* 0x0001 */
61 unsigned char reserved[6];
62 unsigned char exponent[128];
63 unsigned char modulus[128];
64 unsigned char message[128];
65} __attribute__((packed));
66
67/* Mod-Exp, with a large modulus */
68struct type50_meb2_msg {
69 struct type50_hdr header;
70 unsigned short keyblock_type; /* 0x0002 */
71 unsigned char reserved[6];
72 unsigned char exponent[256];
73 unsigned char modulus[256];
74 unsigned char message[256];
75} __attribute__((packed));
76
77/* CRT, with a small modulus */
78struct type50_crb1_msg {
79 struct type50_hdr header;
80 unsigned short keyblock_type; /* 0x0011 */
81 unsigned char reserved[6];
82 unsigned char p[64];
83 unsigned char q[64];
84 unsigned char dp[64];
85 unsigned char dq[64];
86 unsigned char u[64];
87 unsigned char message[128];
88} __attribute__((packed));
89
90/* CRT, with a large modulus */
91struct type50_crb2_msg {
92 struct type50_hdr header;
93 unsigned short keyblock_type; /* 0x0012 */
94 unsigned char reserved[6];
95 unsigned char p[128];
96 unsigned char q[128];
97 unsigned char dp[128];
98 unsigned char dq[128];
99 unsigned char u[128];
100 unsigned char message[256];
101} __attribute__((packed));
102
103/**
104 * The type 80 response family is associated with a CEX2A card.
105 *
106 * Note that all unsigned char arrays are right-justified and left-padded
107 * with zeroes.
108 *
109 * Note that all reserved fields must be zeroes.
110 */
111
112#define TYPE80_RSP_CODE 0x80
113
114struct type80_hdr {
115 unsigned char reserved1;
116 unsigned char type; /* 0x80 */
117 unsigned short len;
118 unsigned char code; /* 0x00 */
119 unsigned char reserved2[3];
120 unsigned char reserved3[8];
121} __attribute__((packed));
122
123int zcrypt_cex2a_init(void);
124void zcrypt_cex2a_exit(void);
125
126#endif /* _ZCRYPT_CEX2A_H_ */
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
new file mode 100644
index 000000000000..2cb616ba8bec
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -0,0 +1,133 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_error.h
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#ifndef _ZCRYPT_ERROR_H_
29#define _ZCRYPT_ERROR_H_
30
31#include "zcrypt_api.h"
32
33/**
34 * Reply Messages
35 *
36 * Error reply messages are of two types:
37 * 82: Error (see below)
38 * 88: Error (see below)
39 * Both type 82 and type 88 have the same structure in the header.
40 *
41 * Request reply messages are of three known types:
42 * 80: Reply from a Type 50 Request (see CEX2A-RELATED STRUCTS)
43 * 84: Reply from a Type 4 Request (see PCICA-RELATED STRUCTS)
44 * 86: Reply from a Type 6 Request (see PCICC/PCIXCC/CEX2C-RELATED STRUCTS)
45 *
46 */
47struct error_hdr {
48 unsigned char reserved1; /* 0x00 */
49 unsigned char type; /* 0x82 or 0x88 */
50 unsigned char reserved2[2]; /* 0x0000 */
51 unsigned char reply_code; /* reply code */
52 unsigned char reserved3[3]; /* 0x000000 */
53};
54
55#define TYPE82_RSP_CODE 0x82
56#define TYPE88_RSP_CODE 0x88
57
58#define REP82_ERROR_MACHINE_FAILURE 0x10
59#define REP82_ERROR_PREEMPT_FAILURE 0x12
60#define REP82_ERROR_CHECKPT_FAILURE 0x14
61#define REP82_ERROR_MESSAGE_TYPE 0x20
62#define REP82_ERROR_INVALID_COMM_CD 0x21 /* Type 84 */
63#define REP82_ERROR_INVALID_MSG_LEN 0x23
64#define REP82_ERROR_RESERVD_FIELD 0x24 /* was 0x50 */
65#define REP82_ERROR_FORMAT_FIELD 0x29
66#define REP82_ERROR_INVALID_COMMAND 0x30
67#define REP82_ERROR_MALFORMED_MSG 0x40
68#define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */
69#define REP82_ERROR_WORD_ALIGNMENT 0x60
70#define REP82_ERROR_MESSAGE_LENGTH 0x80
71#define REP82_ERROR_OPERAND_INVALID 0x82
72#define REP82_ERROR_OPERAND_SIZE 0x84
73#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
74#define REP82_ERROR_RESERVED_FIELD 0x88
75#define REP82_ERROR_TRANSPORT_FAIL 0x90
76#define REP82_ERROR_PACKET_TRUNCATED 0xA0
77#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
78
79#define REP88_ERROR_MODULE_FAILURE 0x10
80
81#define REP88_ERROR_MESSAGE_TYPE 0x20
82#define REP88_ERROR_MESSAGE_MALFORMD 0x22
83#define REP88_ERROR_MESSAGE_LENGTH 0x23
84#define REP88_ERROR_RESERVED_FIELD 0x24
85#define REP88_ERROR_KEY_TYPE 0x34
86#define REP88_ERROR_INVALID_KEY 0x82 /* CEX2A */
87#define REP88_ERROR_OPERAND 0x84 /* CEX2A */
88#define REP88_ERROR_OPERAND_EVEN_MOD 0x85 /* CEX2A */
89
90static inline int convert_error(struct zcrypt_device *zdev,
91 struct ap_message *reply)
92{
93 struct error_hdr *ehdr = reply->message;
94
95 PRINTK("Hardware error : Type %02x Message Header: %08x%08x\n",
96 ehdr->type, *(unsigned int *) reply->message,
97 *(unsigned int *) (reply->message + 4));
98
99 switch (ehdr->reply_code) {
100 case REP82_ERROR_OPERAND_INVALID:
101 case REP82_ERROR_OPERAND_SIZE:
102 case REP82_ERROR_EVEN_MOD_IN_OPND:
103 case REP88_ERROR_MESSAGE_MALFORMD:
104 // REP88_ERROR_INVALID_KEY // '82' CEX2A
105 // REP88_ERROR_OPERAND // '84' CEX2A
106 // REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A
107 /* Invalid input data. */
108 return -EINVAL;
109 case REP82_ERROR_MESSAGE_TYPE:
110 // REP88_ERROR_MESSAGE_TYPE // '20' CEX2A
111 /**
112 * To sent a message of the wrong type is a bug in the
113 * device driver. Warn about it, disable the device
114 * and then repeat the request.
115 */
116 WARN_ON(1);
117 zdev->online = 0;
118 return -EAGAIN;
119 case REP82_ERROR_TRANSPORT_FAIL:
120 case REP82_ERROR_MACHINE_FAILURE:
121 // REP88_ERROR_MODULE_FAILURE // '10' CEX2A
122 /* If a card fails disable it and repeat the request. */
123 zdev->online = 0;
124 return -EAGAIN;
125 default:
126 PRINTKW("unknown type %02x reply code = %d\n",
127 ehdr->type, ehdr->reply_code);
128 zdev->online = 0;
129 return -EAGAIN; /* repeat the request on a different device. */
130 }
131}
132
133#endif /* _ZCRYPT_ERROR_H_ */
diff --git a/drivers/s390/crypto/zcrypt_mono.c b/drivers/s390/crypto/zcrypt_mono.c
new file mode 100644
index 000000000000..2a9349ad68b7
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_mono.c
@@ -0,0 +1,100 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_mono.c
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/interrupt.h>
31#include <linux/miscdevice.h>
32#include <linux/fs.h>
33#include <linux/proc_fs.h>
34#include <linux/compat.h>
35#include <asm/atomic.h>
36#include <asm/uaccess.h>
37
38#include "ap_bus.h"
39#include "zcrypt_api.h"
40#include "zcrypt_pcica.h"
41#include "zcrypt_pcicc.h"
42#include "zcrypt_pcixcc.h"
43#include "zcrypt_cex2a.h"
44
45/**
46 * The module initialization code.
47 */
48int __init zcrypt_init(void)
49{
50 int rc;
51
52 rc = ap_module_init();
53 if (rc)
54 goto out;
55 rc = zcrypt_api_init();
56 if (rc)
57 goto out_ap;
58 rc = zcrypt_pcica_init();
59 if (rc)
60 goto out_api;
61 rc = zcrypt_pcicc_init();
62 if (rc)
63 goto out_pcica;
64 rc = zcrypt_pcixcc_init();
65 if (rc)
66 goto out_pcicc;
67 rc = zcrypt_cex2a_init();
68 if (rc)
69 goto out_pcixcc;
70 return 0;
71
72out_pcixcc:
73 zcrypt_pcixcc_exit();
74out_pcicc:
75 zcrypt_pcicc_exit();
76out_pcica:
77 zcrypt_pcica_exit();
78out_api:
79 zcrypt_api_exit();
80out_ap:
81 ap_module_exit();
82out:
83 return rc;
84}
85
86/**
87 * The module termination code.
88 */
89void __exit zcrypt_exit(void)
90{
91 zcrypt_cex2a_exit();
92 zcrypt_pcixcc_exit();
93 zcrypt_pcicc_exit();
94 zcrypt_pcica_exit();
95 zcrypt_api_exit();
96 ap_module_exit();
97}
98
99module_init(zcrypt_init);
100module_exit(zcrypt_exit);
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
new file mode 100644
index 000000000000..b6a4ecdc8025
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -0,0 +1,418 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_pcica.c
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 * Ralph Wuerthner <rwuerthn@de.ibm.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/err.h>
32#include <asm/atomic.h>
33#include <asm/uaccess.h>
34
35#include "ap_bus.h"
36#include "zcrypt_api.h"
37#include "zcrypt_error.h"
38#include "zcrypt_pcica.h"
39
40#define PCICA_MIN_MOD_SIZE 1 /* 8 bits */
41#define PCICA_MAX_MOD_SIZE 256 /* 2048 bits */
42
43#define PCICA_SPEED_RATING 2800
44
45#define PCICA_MAX_MESSAGE_SIZE 0x3a0 /* sizeof(struct type4_lcr) */
46#define PCICA_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
47
48#define PCICA_CLEANUP_TIME (15*HZ)
49
50static struct ap_device_id zcrypt_pcica_ids[] = {
51 { AP_DEVICE(AP_DEVICE_TYPE_PCICA) },
52 { /* end of list */ },
53};
54
55#ifndef CONFIG_ZCRYPT_MONOLITHIC
56MODULE_DEVICE_TABLE(ap, zcrypt_pcica_ids);
57MODULE_AUTHOR("IBM Corporation");
58MODULE_DESCRIPTION("PCICA Cryptographic Coprocessor device driver, "
59 "Copyright 2001, 2006 IBM Corporation");
60MODULE_LICENSE("GPL");
61#endif
62
63static int zcrypt_pcica_probe(struct ap_device *ap_dev);
64static void zcrypt_pcica_remove(struct ap_device *ap_dev);
65static void zcrypt_pcica_receive(struct ap_device *, struct ap_message *,
66 struct ap_message *);
67
68static struct ap_driver zcrypt_pcica_driver = {
69 .probe = zcrypt_pcica_probe,
70 .remove = zcrypt_pcica_remove,
71 .receive = zcrypt_pcica_receive,
72 .ids = zcrypt_pcica_ids,
73};
74
75/**
76 * Convert a ICAMEX message to a type4 MEX message.
77 *
78 * @zdev: crypto device pointer
79 * @zreq: crypto request pointer
80 * @mex: pointer to user input data
81 *
82 * Returns 0 on success or -EFAULT.
83 */
84static int ICAMEX_msg_to_type4MEX_msg(struct zcrypt_device *zdev,
85 struct ap_message *ap_msg,
86 struct ica_rsa_modexpo *mex)
87{
88 unsigned char *modulus, *exponent, *message;
89 int mod_len;
90
91 mod_len = mex->inputdatalength;
92
93 if (mod_len <= 128) {
94 struct type4_sme *sme = ap_msg->message;
95 memset(sme, 0, sizeof(*sme));
96 ap_msg->length = sizeof(*sme);
97 sme->header.msg_fmt = TYPE4_SME_FMT;
98 sme->header.msg_len = sizeof(*sme);
99 sme->header.msg_type_code = TYPE4_TYPE_CODE;
100 sme->header.request_code = TYPE4_REQU_CODE;
101 modulus = sme->modulus + sizeof(sme->modulus) - mod_len;
102 exponent = sme->exponent + sizeof(sme->exponent) - mod_len;
103 message = sme->message + sizeof(sme->message) - mod_len;
104 } else {
105 struct type4_lme *lme = ap_msg->message;
106 memset(lme, 0, sizeof(*lme));
107 ap_msg->length = sizeof(*lme);
108 lme->header.msg_fmt = TYPE4_LME_FMT;
109 lme->header.msg_len = sizeof(*lme);
110 lme->header.msg_type_code = TYPE4_TYPE_CODE;
111 lme->header.request_code = TYPE4_REQU_CODE;
112 modulus = lme->modulus + sizeof(lme->modulus) - mod_len;
113 exponent = lme->exponent + sizeof(lme->exponent) - mod_len;
114 message = lme->message + sizeof(lme->message) - mod_len;
115 }
116
117 if (copy_from_user(modulus, mex->n_modulus, mod_len) ||
118 copy_from_user(exponent, mex->b_key, mod_len) ||
119 copy_from_user(message, mex->inputdata, mod_len))
120 return -EFAULT;
121 return 0;
122}
123
124/**
125 * Convert a ICACRT message to a type4 CRT message.
126 *
127 * @zdev: crypto device pointer
128 * @zreq: crypto request pointer
129 * @crt: pointer to user input data
130 *
131 * Returns 0 on success or -EFAULT.
132 */
133static int ICACRT_msg_to_type4CRT_msg(struct zcrypt_device *zdev,
134 struct ap_message *ap_msg,
135 struct ica_rsa_modexpo_crt *crt)
136{
137 unsigned char *p, *q, *dp, *dq, *u, *inp;
138 int mod_len, short_len, long_len;
139
140 mod_len = crt->inputdatalength;
141 short_len = mod_len / 2;
142 long_len = mod_len / 2 + 8;
143
144 if (mod_len <= 128) {
145 struct type4_scr *scr = ap_msg->message;
146 memset(scr, 0, sizeof(*scr));
147 ap_msg->length = sizeof(*scr);
148 scr->header.msg_type_code = TYPE4_TYPE_CODE;
149 scr->header.request_code = TYPE4_REQU_CODE;
150 scr->header.msg_fmt = TYPE4_SCR_FMT;
151 scr->header.msg_len = sizeof(*scr);
152 p = scr->p + sizeof(scr->p) - long_len;
153 q = scr->q + sizeof(scr->q) - short_len;
154 dp = scr->dp + sizeof(scr->dp) - long_len;
155 dq = scr->dq + sizeof(scr->dq) - short_len;
156 u = scr->u + sizeof(scr->u) - long_len;
157 inp = scr->message + sizeof(scr->message) - mod_len;
158 } else {
159 struct type4_lcr *lcr = ap_msg->message;
160 memset(lcr, 0, sizeof(*lcr));
161 ap_msg->length = sizeof(*lcr);
162 lcr->header.msg_type_code = TYPE4_TYPE_CODE;
163 lcr->header.request_code = TYPE4_REQU_CODE;
164 lcr->header.msg_fmt = TYPE4_LCR_FMT;
165 lcr->header.msg_len = sizeof(*lcr);
166 p = lcr->p + sizeof(lcr->p) - long_len;
167 q = lcr->q + sizeof(lcr->q) - short_len;
168 dp = lcr->dp + sizeof(lcr->dp) - long_len;
169 dq = lcr->dq + sizeof(lcr->dq) - short_len;
170 u = lcr->u + sizeof(lcr->u) - long_len;
171 inp = lcr->message + sizeof(lcr->message) - mod_len;
172 }
173
174 if (copy_from_user(p, crt->np_prime, long_len) ||
175 copy_from_user(q, crt->nq_prime, short_len) ||
176 copy_from_user(dp, crt->bp_key, long_len) ||
177 copy_from_user(dq, crt->bq_key, short_len) ||
178 copy_from_user(u, crt->u_mult_inv, long_len) ||
179 copy_from_user(inp, crt->inputdata, mod_len))
180 return -EFAULT;
181 return 0;
182}
183
184/**
185 * Copy results from a type 84 reply message back to user space.
186 *
187 * @zdev: crypto device pointer
188 * @reply: reply AP message.
189 * @data: pointer to user output data
190 * @length: size of user output data
191 *
192 * Returns 0 on success or -EFAULT.
193 */
194static inline int convert_type84(struct zcrypt_device *zdev,
195 struct ap_message *reply,
196 char __user *outputdata,
197 unsigned int outputdatalength)
198{
199 struct type84_hdr *t84h = reply->message;
200 char *data;
201
202 if (t84h->len < sizeof(*t84h) + outputdatalength) {
203 /* The result is too short, the PCICA card may not do that.. */
204 zdev->online = 0;
205 return -EAGAIN; /* repeat the request on a different device. */
206 }
207 BUG_ON(t84h->len > PCICA_MAX_RESPONSE_SIZE);
208 data = reply->message + t84h->len - outputdatalength;
209 if (copy_to_user(outputdata, data, outputdatalength))
210 return -EFAULT;
211 return 0;
212}
213
214static int convert_response(struct zcrypt_device *zdev,
215 struct ap_message *reply,
216 char __user *outputdata,
217 unsigned int outputdatalength)
218{
219 /* Response type byte is the second byte in the response. */
220 switch (((unsigned char *) reply->message)[1]) {
221 case TYPE82_RSP_CODE:
222 case TYPE88_RSP_CODE:
223 return convert_error(zdev, reply);
224 case TYPE84_RSP_CODE:
225 return convert_type84(zdev, reply,
226 outputdata, outputdatalength);
227 default: /* Unknown response type, this should NEVER EVER happen */
228 PRINTK("Unrecognized Message Header: %08x%08x\n",
229 *(unsigned int *) reply->message,
230 *(unsigned int *) (reply->message+4));
231 zdev->online = 0;
232 return -EAGAIN; /* repeat the request on a different device. */
233 }
234}
235
236/**
237 * This function is called from the AP bus code after a crypto request
238 * "msg" has finished with the reply message "reply".
239 * It is called from tasklet context.
240 * @ap_dev: pointer to the AP device
241 * @msg: pointer to the AP message
242 * @reply: pointer to the AP reply message
243 */
244static void zcrypt_pcica_receive(struct ap_device *ap_dev,
245 struct ap_message *msg,
246 struct ap_message *reply)
247{
248 static struct error_hdr error_reply = {
249 .type = TYPE82_RSP_CODE,
250 .reply_code = REP82_ERROR_MACHINE_FAILURE,
251 };
252 struct type84_hdr *t84h = reply->message;
253 int length;
254
255 /* Copy the reply message to the request message buffer. */
256 if (IS_ERR(reply))
257 memcpy(msg->message, &error_reply, sizeof(error_reply));
258 else if (t84h->code == TYPE84_RSP_CODE) {
259 length = min(PCICA_MAX_RESPONSE_SIZE, (int) t84h->len);
260 memcpy(msg->message, reply->message, length);
261 } else
262 memcpy(msg->message, reply->message, sizeof error_reply);
263 complete((struct completion *) msg->private);
264}
265
266static atomic_t zcrypt_step = ATOMIC_INIT(0);
267
268/**
269 * The request distributor calls this function if it picked the PCICA
270 * device to handle a modexpo request.
271 * @zdev: pointer to zcrypt_device structure that identifies the
272 * PCICA device to the request distributor
273 * @mex: pointer to the modexpo request buffer
274 */
275static long zcrypt_pcica_modexpo(struct zcrypt_device *zdev,
276 struct ica_rsa_modexpo *mex)
277{
278 struct ap_message ap_msg;
279 struct completion work;
280 int rc;
281
282 ap_msg.message = (void *) kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
283 if (!ap_msg.message)
284 return -ENOMEM;
285 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
286 atomic_inc_return(&zcrypt_step);
287 ap_msg.private = &work;
288 rc = ICAMEX_msg_to_type4MEX_msg(zdev, &ap_msg, mex);
289 if (rc)
290 goto out_free;
291 init_completion(&work);
292 ap_queue_message(zdev->ap_dev, &ap_msg);
293 rc = wait_for_completion_interruptible_timeout(
294 &work, PCICA_CLEANUP_TIME);
295 if (rc > 0)
296 rc = convert_response(zdev, &ap_msg, mex->outputdata,
297 mex->outputdatalength);
298 else {
299 /* Signal pending or message timed out. */
300 ap_cancel_message(zdev->ap_dev, &ap_msg);
301 if (rc == 0)
302 /* Message timed out. */
303 rc = -ETIME;
304 }
305out_free:
306 kfree(ap_msg.message);
307 return rc;
308}
309
310/**
311 * The request distributor calls this function if it picked the PCICA
312 * device to handle a modexpo_crt request.
313 * @zdev: pointer to zcrypt_device structure that identifies the
314 * PCICA device to the request distributor
315 * @crt: pointer to the modexpoc_crt request buffer
316 */
317static long zcrypt_pcica_modexpo_crt(struct zcrypt_device *zdev,
318 struct ica_rsa_modexpo_crt *crt)
319{
320 struct ap_message ap_msg;
321 struct completion work;
322 int rc;
323
324 ap_msg.message = (void *) kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
325 if (!ap_msg.message)
326 return -ENOMEM;
327 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
328 atomic_inc_return(&zcrypt_step);
329 ap_msg.private = &work;
330 rc = ICACRT_msg_to_type4CRT_msg(zdev, &ap_msg, crt);
331 if (rc)
332 goto out_free;
333 init_completion(&work);
334 ap_queue_message(zdev->ap_dev, &ap_msg);
335 rc = wait_for_completion_interruptible_timeout(
336 &work, PCICA_CLEANUP_TIME);
337 if (rc > 0)
338 rc = convert_response(zdev, &ap_msg, crt->outputdata,
339 crt->outputdatalength);
340 else {
341 /* Signal pending or message timed out. */
342 ap_cancel_message(zdev->ap_dev, &ap_msg);
343 if (rc == 0)
344 /* Message timed out. */
345 rc = -ETIME;
346 }
347out_free:
348 kfree(ap_msg.message);
349 return rc;
350}
351
352/**
353 * The crypto operations for a PCICA card.
354 */
355static struct zcrypt_ops zcrypt_pcica_ops = {
356 .rsa_modexpo = zcrypt_pcica_modexpo,
357 .rsa_modexpo_crt = zcrypt_pcica_modexpo_crt,
358};
359
360/**
361 * Probe function for PCICA cards. It always accepts the AP device
362 * since the bus_match already checked the hardware type.
363 * @ap_dev: pointer to the AP device.
364 */
365static int zcrypt_pcica_probe(struct ap_device *ap_dev)
366{
367 struct zcrypt_device *zdev;
368 int rc;
369
370 zdev = zcrypt_device_alloc(PCICA_MAX_RESPONSE_SIZE);
371 if (!zdev)
372 return -ENOMEM;
373 zdev->ap_dev = ap_dev;
374 zdev->ops = &zcrypt_pcica_ops;
375 zdev->online = 1;
376 zdev->user_space_type = ZCRYPT_PCICA;
377 zdev->type_string = "PCICA";
378 zdev->min_mod_size = PCICA_MIN_MOD_SIZE;
379 zdev->max_mod_size = PCICA_MAX_MOD_SIZE;
380 zdev->speed_rating = PCICA_SPEED_RATING;
381 ap_dev->reply = &zdev->reply;
382 ap_dev->private = zdev;
383 rc = zcrypt_device_register(zdev);
384 if (rc)
385 goto out_free;
386 return 0;
387
388out_free:
389 ap_dev->private = NULL;
390 zcrypt_device_free(zdev);
391 return rc;
392}
393
394/**
395 * This is called to remove the extended PCICA driver information
396 * if an AP device is removed.
397 */
398static void zcrypt_pcica_remove(struct ap_device *ap_dev)
399{
400 struct zcrypt_device *zdev = ap_dev->private;
401
402 zcrypt_device_unregister(zdev);
403}
404
405int __init zcrypt_pcica_init(void)
406{
407 return ap_driver_register(&zcrypt_pcica_driver, THIS_MODULE, "pcica");
408}
409
410void zcrypt_pcica_exit(void)
411{
412 ap_driver_unregister(&zcrypt_pcica_driver);
413}
414
415#ifndef CONFIG_ZCRYPT_MONOLITHIC
416module_init(zcrypt_pcica_init);
417module_exit(zcrypt_pcica_exit);
418#endif
diff --git a/drivers/s390/crypto/zcrypt_pcica.h b/drivers/s390/crypto/zcrypt_pcica.h
new file mode 100644
index 000000000000..3be11187f6df
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_pcica.h
@@ -0,0 +1,117 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_pcica.h
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#ifndef _ZCRYPT_PCICA_H_
29#define _ZCRYPT_PCICA_H_
30
31/**
32 * The type 4 message family is associated with a PCICA card.
33 *
34 * The four members of the family are described below.
35 *
36 * Note that all unsigned char arrays are right-justified and left-padded
37 * with zeroes.
38 *
39 * Note that all reserved fields must be zeroes.
40 */
41struct type4_hdr {
42 unsigned char reserved1;
43 unsigned char msg_type_code; /* 0x04 */
44 unsigned short msg_len;
45 unsigned char request_code; /* 0x40 */
46 unsigned char msg_fmt;
47 unsigned short reserved2;
48} __attribute__((packed));
49
50#define TYPE4_TYPE_CODE 0x04
51#define TYPE4_REQU_CODE 0x40
52
53#define TYPE4_SME_FMT 0x00
54#define TYPE4_LME_FMT 0x10
55#define TYPE4_SCR_FMT 0x40
56#define TYPE4_LCR_FMT 0x50
57
58/* Mod-Exp, with a small modulus */
59struct type4_sme {
60 struct type4_hdr header;
61 unsigned char message[128];
62 unsigned char exponent[128];
63 unsigned char modulus[128];
64} __attribute__((packed));
65
66/* Mod-Exp, with a large modulus */
67struct type4_lme {
68 struct type4_hdr header;
69 unsigned char message[256];
70 unsigned char exponent[256];
71 unsigned char modulus[256];
72} __attribute__((packed));
73
74/* CRT, with a small modulus */
75struct type4_scr {
76 struct type4_hdr header;
77 unsigned char message[128];
78 unsigned char dp[72];
79 unsigned char dq[64];
80 unsigned char p[72];
81 unsigned char q[64];
82 unsigned char u[72];
83} __attribute__((packed));
84
85/* CRT, with a large modulus */
86struct type4_lcr {
87 struct type4_hdr header;
88 unsigned char message[256];
89 unsigned char dp[136];
90 unsigned char dq[128];
91 unsigned char p[136];
92 unsigned char q[128];
93 unsigned char u[136];
94} __attribute__((packed));
95
96/**
97 * The type 84 response family is associated with a PCICA card.
98 *
99 * Note that all unsigned char arrays are right-justified and left-padded
100 * with zeroes.
101 *
102 * Note that all reserved fields must be zeroes.
103 */
104
105struct type84_hdr {
106 unsigned char reserved1;
107 unsigned char code;
108 unsigned short len;
109 unsigned char reserved2[4];
110} __attribute__((packed));
111
112#define TYPE84_RSP_CODE 0x84
113
114int zcrypt_pcica_init(void);
115void zcrypt_pcica_exit(void);
116
117#endif /* _ZCRYPT_PCICA_H_ */
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
new file mode 100644
index 000000000000..f295a403b29a
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -0,0 +1,630 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_pcicc.c
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 * Ralph Wuerthner <rwuerthn@de.ibm.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/err.h>
32#include <asm/atomic.h>
33#include <asm/uaccess.h>
34
35#include "ap_bus.h"
36#include "zcrypt_api.h"
37#include "zcrypt_error.h"
38#include "zcrypt_pcicc.h"
39#include "zcrypt_cca_key.h"
40
41#define PCICC_MIN_MOD_SIZE 64 /* 512 bits */
42#define PCICC_MAX_MOD_SIZE_OLD 128 /* 1024 bits */
43#define PCICC_MAX_MOD_SIZE 256 /* 2048 bits */
44
45/**
46 * PCICC cards need a speed rating of 0. This keeps them at the end of
47 * the zcrypt device list (see zcrypt_api.c). PCICC cards are only
48 * used if no other cards are present because they are slow and can only
49 * cope with PKCS12 padded requests. The logic is queer. PKCS11 padded
50 * requests are rejected. The modexpo function encrypts PKCS12 padded data
51 * and decrypts any non-PKCS12 padded data (except PKCS11) in the assumption
52 * that it's encrypted PKCS12 data. The modexpo_crt function always decrypts
53 * the data in the assumption that its PKCS12 encrypted data.
54 */
55#define PCICC_SPEED_RATING 0
56
57#define PCICC_MAX_MESSAGE_SIZE 0x710 /* max size type6 v1 crt message */
58#define PCICC_MAX_RESPONSE_SIZE 0x710 /* max size type86 v1 reply */
59
60#define PCICC_CLEANUP_TIME (15*HZ)
61
62static struct ap_device_id zcrypt_pcicc_ids[] = {
63 { AP_DEVICE(AP_DEVICE_TYPE_PCICC) },
64 { /* end of list */ },
65};
66
67#ifndef CONFIG_ZCRYPT_MONOLITHIC
68MODULE_DEVICE_TABLE(ap, zcrypt_pcicc_ids);
69MODULE_AUTHOR("IBM Corporation");
70MODULE_DESCRIPTION("PCICC Cryptographic Coprocessor device driver, "
71 "Copyright 2001, 2006 IBM Corporation");
72MODULE_LICENSE("GPL");
73#endif
74
75static int zcrypt_pcicc_probe(struct ap_device *ap_dev);
76static void zcrypt_pcicc_remove(struct ap_device *ap_dev);
77static void zcrypt_pcicc_receive(struct ap_device *, struct ap_message *,
78 struct ap_message *);
79
80static struct ap_driver zcrypt_pcicc_driver = {
81 .probe = zcrypt_pcicc_probe,
82 .remove = zcrypt_pcicc_remove,
83 .receive = zcrypt_pcicc_receive,
84 .ids = zcrypt_pcicc_ids,
85};
86
87/**
88 * The following is used to initialize the CPRB passed to the PCICC card
89 * in a type6 message. The 3 fields that must be filled in at execution
90 * time are req_parml, rpl_parml and usage_domain. Note that all three
91 * fields are *little*-endian. Actually, everything about this interface
92 * is ascii/little-endian, since the device has 'Intel inside'.
93 *
94 * The CPRB is followed immediately by the parm block.
95 * The parm block contains:
96 * - function code ('PD' 0x5044 or 'PK' 0x504B)
97 * - rule block (0x0A00 'PKCS-1.2' or 0x0A00 'ZERO-PAD')
98 * - VUD block
99 */
100static struct CPRB static_cprb = {
101 .cprb_len = __constant_cpu_to_le16(0x0070),
102 .cprb_ver_id = 0x41,
103 .func_id = {0x54,0x32},
104 .checkpoint_flag= 0x01,
105 .svr_namel = __constant_cpu_to_le16(0x0008),
106 .svr_name = {'I','C','S','F',' ',' ',' ',' '}
107};
108
109/**
110 * Check the message for PKCS11 padding.
111 */
112static inline int is_PKCS11_padded(unsigned char *buffer, int length)
113{
114 int i;
115 if ((buffer[0] != 0x00) || (buffer[1] != 0x01))
116 return 0;
117 for (i = 2; i < length; i++)
118 if (buffer[i] != 0xFF)
119 break;
120 if (i < 10 || i == length)
121 return 0;
122 if (buffer[i] != 0x00)
123 return 0;
124 return 1;
125}
126
127/**
128 * Check the message for PKCS12 padding.
129 */
130static inline int is_PKCS12_padded(unsigned char *buffer, int length)
131{
132 int i;
133 if ((buffer[0] != 0x00) || (buffer[1] != 0x02))
134 return 0;
135 for (i = 2; i < length; i++)
136 if (buffer[i] == 0x00)
137 break;
138 if ((i < 10) || (i == length))
139 return 0;
140 if (buffer[i] != 0x00)
141 return 0;
142 return 1;
143}
144
145/**
146 * Convert a ICAMEX message to a type6 MEX message.
147 *
148 * @zdev: crypto device pointer
149 * @zreq: crypto request pointer
150 * @mex: pointer to user input data
151 *
152 * Returns 0 on success or -EFAULT.
153 */
154static int ICAMEX_msg_to_type6MEX_msg(struct zcrypt_device *zdev,
155 struct ap_message *ap_msg,
156 struct ica_rsa_modexpo *mex)
157{
158 static struct type6_hdr static_type6_hdr = {
159 .type = 0x06,
160 .offset1 = 0x00000058,
161 .agent_id = {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50,
162 0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01},
163 .function_code = {'P','K'},
164 };
165 static struct function_and_rules_block static_pke_function_and_rules ={
166 .function_code = {'P','K'},
167 .ulen = __constant_cpu_to_le16(10),
168 .only_rule = {'P','K','C','S','-','1','.','2'}
169 };
170 struct {
171 struct type6_hdr hdr;
172 struct CPRB cprb;
173 struct function_and_rules_block fr;
174 unsigned short length;
175 char text[0];
176 } __attribute__((packed)) *msg = ap_msg->message;
177 int vud_len, pad_len, size;
178
179 /* VUD.ciphertext */
180 if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength))
181 return -EFAULT;
182
183 if (is_PKCS11_padded(msg->text, mex->inputdatalength))
184 return -EINVAL;
185
186 /* static message header and f&r */
187 msg->hdr = static_type6_hdr;
188 msg->fr = static_pke_function_and_rules;
189
190 if (is_PKCS12_padded(msg->text, mex->inputdatalength)) {
191 /* strip the padding and adjust the data length */
192 pad_len = strnlen(msg->text + 2, mex->inputdatalength - 2) + 3;
193 if (pad_len <= 9 || pad_len >= mex->inputdatalength)
194 return -ENODEV;
195 vud_len = mex->inputdatalength - pad_len;
196 memmove(msg->text, msg->text + pad_len, vud_len);
197 msg->length = cpu_to_le16(vud_len + 2);
198
199 /* Set up key after the variable length text. */
200 size = zcrypt_type6_mex_key_en(mex, msg->text + vud_len, 0);
201 if (size < 0)
202 return size;
203 size += sizeof(*msg) + vud_len; /* total size of msg */
204 } else {
205 vud_len = mex->inputdatalength;
206 msg->length = cpu_to_le16(2 + vud_len);
207
208 msg->hdr.function_code[1] = 'D';
209 msg->fr.function_code[1] = 'D';
210
211 /* Set up key after the variable length text. */
212 size = zcrypt_type6_mex_key_de(mex, msg->text + vud_len, 0);
213 if (size < 0)
214 return size;
215 size += sizeof(*msg) + vud_len; /* total size of msg */
216 }
217
218 /* message header, cprb and f&r */
219 msg->hdr.ToCardLen1 = (size - sizeof(msg->hdr) + 3) & -4;
220 msg->hdr.FromCardLen1 = PCICC_MAX_RESPONSE_SIZE - sizeof(msg->hdr);
221
222 msg->cprb = static_cprb;
223 msg->cprb.usage_domain[0]= AP_QID_QUEUE(zdev->ap_dev->qid);
224 msg->cprb.req_parml = cpu_to_le16(size - sizeof(msg->hdr) -
225 sizeof(msg->cprb));
226 msg->cprb.rpl_parml = cpu_to_le16(msg->hdr.FromCardLen1);
227
228 ap_msg->length = (size + 3) & -4;
229 return 0;
230}
231
232/**
233 * Convert a ICACRT message to a type6 CRT message.
234 *
235 * @zdev: crypto device pointer
236 * @zreq: crypto request pointer
237 * @crt: pointer to user input data
238 *
239 * Returns 0 on success or -EFAULT.
240 */
241static int ICACRT_msg_to_type6CRT_msg(struct zcrypt_device *zdev,
242 struct ap_message *ap_msg,
243 struct ica_rsa_modexpo_crt *crt)
244{
245 static struct type6_hdr static_type6_hdr = {
246 .type = 0x06,
247 .offset1 = 0x00000058,
248 .agent_id = {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50,
249 0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01},
250 .function_code = {'P','D'},
251 };
252 static struct function_and_rules_block static_pkd_function_and_rules ={
253 .function_code = {'P','D'},
254 .ulen = __constant_cpu_to_le16(10),
255 .only_rule = {'P','K','C','S','-','1','.','2'}
256 };
257 struct {
258 struct type6_hdr hdr;
259 struct CPRB cprb;
260 struct function_and_rules_block fr;
261 unsigned short length;
262 char text[0];
263 } __attribute__((packed)) *msg = ap_msg->message;
264 int size;
265
266 /* VUD.ciphertext */
267 msg->length = cpu_to_le16(2 + crt->inputdatalength);
268 if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength))
269 return -EFAULT;
270
271 if (is_PKCS11_padded(msg->text, crt->inputdatalength))
272 return -EINVAL;
273
274 /* Set up key after the variable length text. */
275 size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 0);
276 if (size < 0)
277 return size;
278 size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */
279
280 /* message header, cprb and f&r */
281 msg->hdr = static_type6_hdr;
282 msg->hdr.ToCardLen1 = (size - sizeof(msg->hdr) + 3) & -4;
283 msg->hdr.FromCardLen1 = PCICC_MAX_RESPONSE_SIZE - sizeof(msg->hdr);
284
285 msg->cprb = static_cprb;
286 msg->cprb.usage_domain[0] = AP_QID_QUEUE(zdev->ap_dev->qid);
287 msg->cprb.req_parml = msg->cprb.rpl_parml =
288 cpu_to_le16(size - sizeof(msg->hdr) - sizeof(msg->cprb));
289
290 msg->fr = static_pkd_function_and_rules;
291
292 ap_msg->length = (size + 3) & -4;
293 return 0;
294}
295
296/**
297 * Copy results from a type 86 reply message back to user space.
298 *
299 * @zdev: crypto device pointer
300 * @reply: reply AP message.
301 * @data: pointer to user output data
302 * @length: size of user output data
303 *
304 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
305 */
306struct type86_reply {
307 struct type86_hdr hdr;
308 struct type86_fmt2_ext fmt2;
309 struct CPRB cprb;
310 unsigned char pad[4]; /* 4 byte function code/rules block ? */
311 unsigned short length;
312 char text[0];
313} __attribute__((packed));
314
315static int convert_type86(struct zcrypt_device *zdev,
316 struct ap_message *reply,
317 char __user *outputdata,
318 unsigned int outputdatalength)
319{
320 static unsigned char static_pad[] = {
321 0x00,0x02,
322 0x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD,
323 0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57,
324 0x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B,
325 0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39,
326 0xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5,
327 0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D,
328 0x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB,
329 0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F,
330 0x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9,
331 0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45,
332 0x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9,
333 0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F,
334 0x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD,
335 0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D,
336 0xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD,
337 0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9,
338 0x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B,
339 0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B,
340 0x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B,
341 0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD,
342 0x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7,
343 0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1,
344 0x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3,
345 0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23,
346 0x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55,
347 0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43,
348 0x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F,
349 0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F,
350 0x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5,
351 0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD,
352 0x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41,
353 0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09
354 };
355 struct type86_reply *msg = reply->message;
356 unsigned short service_rc, service_rs;
357 unsigned int reply_len, pad_len;
358 char *data;
359
360 service_rc = le16_to_cpu(msg->cprb.ccp_rtcode);
361 if (unlikely(service_rc != 0)) {
362 service_rs = le16_to_cpu(msg->cprb.ccp_rscode);
363 if (service_rc == 8 && service_rs == 66) {
364 PDEBUG("Bad block format on PCICC\n");
365 return -EINVAL;
366 }
367 if (service_rc == 8 && service_rs == 65) {
368 PDEBUG("Probably an even modulus on PCICC\n");
369 return -EINVAL;
370 }
371 if (service_rc == 8 && service_rs == 770) {
372 PDEBUG("Invalid key length on PCICC\n");
373 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
374 return -EAGAIN;
375 }
376 if (service_rc == 8 && service_rs == 783) {
377 PDEBUG("Extended bitlengths not enabled on PCICC\n");
378 zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD;
379 return -EAGAIN;
380 }
381 PRINTK("Unknown service rc/rs (PCICC): %d/%d\n",
382 service_rc, service_rs);
383 zdev->online = 0;
384 return -EAGAIN; /* repeat the request on a different device. */
385 }
386 data = msg->text;
387 reply_len = le16_to_cpu(msg->length) - 2;
388 if (reply_len > outputdatalength)
389 return -EINVAL;
390 /**
391 * For all encipher requests, the length of the ciphertext (reply_len)
392 * will always equal the modulus length. For MEX decipher requests
393 * the output needs to get padded. Minimum pad size is 10.
394 *
395 * Currently, the cases where padding will be added is for:
396 * - PCIXCC_MCL2 using a CRT form token (since PKD didn't support
397 * ZERO-PAD and CRT is only supported for PKD requests)
398 * - PCICC, always
399 */
400 pad_len = outputdatalength - reply_len;
401 if (pad_len > 0) {
402 if (pad_len < 10)
403 return -EINVAL;
404 /* 'restore' padding left in the PCICC/PCIXCC card. */
405 if (copy_to_user(outputdata, static_pad, pad_len - 1))
406 return -EFAULT;
407 if (put_user(0, outputdata + pad_len - 1))
408 return -EFAULT;
409 }
410 /* Copy the crypto response to user space. */
411 if (copy_to_user(outputdata + pad_len, data, reply_len))
412 return -EFAULT;
413 return 0;
414}
415
416static int convert_response(struct zcrypt_device *zdev,
417 struct ap_message *reply,
418 char __user *outputdata,
419 unsigned int outputdatalength)
420{
421 struct type86_reply *msg = reply->message;
422
423 /* Response type byte is the second byte in the response. */
424 switch (msg->hdr.type) {
425 case TYPE82_RSP_CODE:
426 case TYPE88_RSP_CODE:
427 return convert_error(zdev, reply);
428 case TYPE86_RSP_CODE:
429 if (msg->hdr.reply_code)
430 return convert_error(zdev, reply);
431 if (msg->cprb.cprb_ver_id == 0x01)
432 return convert_type86(zdev, reply,
433 outputdata, outputdatalength);
434 /* no break, incorrect cprb version is an unknown response */
435 default: /* Unknown response type, this should NEVER EVER happen */
436 PRINTK("Unrecognized Message Header: %08x%08x\n",
437 *(unsigned int *) reply->message,
438 *(unsigned int *) (reply->message+4));
439 zdev->online = 0;
440 return -EAGAIN; /* repeat the request on a different device. */
441 }
442}
443
444/**
445 * This function is called from the AP bus code after a crypto request
446 * "msg" has finished with the reply message "reply".
447 * It is called from tasklet context.
448 * @ap_dev: pointer to the AP device
449 * @msg: pointer to the AP message
450 * @reply: pointer to the AP reply message
451 */
452static void zcrypt_pcicc_receive(struct ap_device *ap_dev,
453 struct ap_message *msg,
454 struct ap_message *reply)
455{
456 static struct error_hdr error_reply = {
457 .type = TYPE82_RSP_CODE,
458 .reply_code = REP82_ERROR_MACHINE_FAILURE,
459 };
460 struct type86_reply *t86r = reply->message;
461 int length;
462
463 /* Copy the reply message to the request message buffer. */
464 if (IS_ERR(reply))
465 memcpy(msg->message, &error_reply, sizeof(error_reply));
466 else if (t86r->hdr.type == TYPE86_RSP_CODE &&
467 t86r->cprb.cprb_ver_id == 0x01) {
468 length = sizeof(struct type86_reply) + t86r->length - 2;
469 length = min(PCICC_MAX_RESPONSE_SIZE, length);
470 memcpy(msg->message, reply->message, length);
471 } else
472 memcpy(msg->message, reply->message, sizeof error_reply);
473 complete((struct completion *) msg->private);
474}
475
476static atomic_t zcrypt_step = ATOMIC_INIT(0);
477
478/**
479 * The request distributor calls this function if it picked the PCICC
480 * device to handle a modexpo request.
481 * @zdev: pointer to zcrypt_device structure that identifies the
482 * PCICC device to the request distributor
483 * @mex: pointer to the modexpo request buffer
484 */
485static long zcrypt_pcicc_modexpo(struct zcrypt_device *zdev,
486 struct ica_rsa_modexpo *mex)
487{
488 struct ap_message ap_msg;
489 struct completion work;
490 int rc;
491
492 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
493 if (!ap_msg.message)
494 return -ENOMEM;
495 ap_msg.length = PAGE_SIZE;
496 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
497 atomic_inc_return(&zcrypt_step);
498 ap_msg.private = &work;
499 rc = ICAMEX_msg_to_type6MEX_msg(zdev, &ap_msg, mex);
500 if (rc)
501 goto out_free;
502 init_completion(&work);
503 ap_queue_message(zdev->ap_dev, &ap_msg);
504 rc = wait_for_completion_interruptible_timeout(
505 &work, PCICC_CLEANUP_TIME);
506 if (rc > 0)
507 rc = convert_response(zdev, &ap_msg, mex->outputdata,
508 mex->outputdatalength);
509 else {
510 /* Signal pending or message timed out. */
511 ap_cancel_message(zdev->ap_dev, &ap_msg);
512 if (rc == 0)
513 /* Message timed out. */
514 rc = -ETIME;
515 }
516out_free:
517 free_page((unsigned long) ap_msg.message);
518 return rc;
519}
520
521/**
522 * The request distributor calls this function if it picked the PCICC
523 * device to handle a modexpo_crt request.
524 * @zdev: pointer to zcrypt_device structure that identifies the
525 * PCICC device to the request distributor
526 * @crt: pointer to the modexpoc_crt request buffer
527 */
528static long zcrypt_pcicc_modexpo_crt(struct zcrypt_device *zdev,
529 struct ica_rsa_modexpo_crt *crt)
530{
531 struct ap_message ap_msg;
532 struct completion work;
533 int rc;
534
535 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
536 if (!ap_msg.message)
537 return -ENOMEM;
538 ap_msg.length = PAGE_SIZE;
539 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
540 atomic_inc_return(&zcrypt_step);
541 ap_msg.private = &work;
542 rc = ICACRT_msg_to_type6CRT_msg(zdev, &ap_msg, crt);
543 if (rc)
544 goto out_free;
545 init_completion(&work);
546 ap_queue_message(zdev->ap_dev, &ap_msg);
547 rc = wait_for_completion_interruptible_timeout(
548 &work, PCICC_CLEANUP_TIME);
549 if (rc > 0)
550 rc = convert_response(zdev, &ap_msg, crt->outputdata,
551 crt->outputdatalength);
552 else {
553 /* Signal pending or message timed out. */
554 ap_cancel_message(zdev->ap_dev, &ap_msg);
555 if (rc == 0)
556 /* Message timed out. */
557 rc = -ETIME;
558 }
559out_free:
560 free_page((unsigned long) ap_msg.message);
561 return rc;
562}
563
564/**
565 * The crypto operations for a PCICC card.
566 */
567static struct zcrypt_ops zcrypt_pcicc_ops = {
568 .rsa_modexpo = zcrypt_pcicc_modexpo,
569 .rsa_modexpo_crt = zcrypt_pcicc_modexpo_crt,
570};
571
572/**
573 * Probe function for PCICC cards. It always accepts the AP device
574 * since the bus_match already checked the hardware type.
575 * @ap_dev: pointer to the AP device.
576 */
577static int zcrypt_pcicc_probe(struct ap_device *ap_dev)
578{
579 struct zcrypt_device *zdev;
580 int rc;
581
582 zdev = zcrypt_device_alloc(PCICC_MAX_RESPONSE_SIZE);
583 if (!zdev)
584 return -ENOMEM;
585 zdev->ap_dev = ap_dev;
586 zdev->ops = &zcrypt_pcicc_ops;
587 zdev->online = 1;
588 zdev->user_space_type = ZCRYPT_PCICC;
589 zdev->type_string = "PCICC";
590 zdev->min_mod_size = PCICC_MIN_MOD_SIZE;
591 zdev->max_mod_size = PCICC_MAX_MOD_SIZE;
592 zdev->speed_rating = PCICC_SPEED_RATING;
593 ap_dev->reply = &zdev->reply;
594 ap_dev->private = zdev;
595 rc = zcrypt_device_register(zdev);
596 if (rc)
597 goto out_free;
598 return 0;
599
600 out_free:
601 ap_dev->private = NULL;
602 zcrypt_device_free(zdev);
603 return rc;
604}
605
606/**
607 * This is called to remove the extended PCICC driver information
608 * if an AP device is removed.
609 */
610static void zcrypt_pcicc_remove(struct ap_device *ap_dev)
611{
612 struct zcrypt_device *zdev = ap_dev->private;
613
614 zcrypt_device_unregister(zdev);
615}
616
617int __init zcrypt_pcicc_init(void)
618{
619 return ap_driver_register(&zcrypt_pcicc_driver, THIS_MODULE, "pcicc");
620}
621
622void zcrypt_pcicc_exit(void)
623{
624 ap_driver_unregister(&zcrypt_pcicc_driver);
625}
626
627#ifndef CONFIG_ZCRYPT_MONOLITHIC
628module_init(zcrypt_pcicc_init);
629module_exit(zcrypt_pcicc_exit);
630#endif
diff --git a/drivers/s390/crypto/zcrypt_pcicc.h b/drivers/s390/crypto/zcrypt_pcicc.h
new file mode 100644
index 000000000000..6d4454846c8f
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_pcicc.h
@@ -0,0 +1,176 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_pcicc.h
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#ifndef _ZCRYPT_PCICC_H_
29#define _ZCRYPT_PCICC_H_
30
31/**
32 * The type 6 message family is associated with PCICC or PCIXCC cards.
33 *
34 * It contains a message header followed by a CPRB, both of which
35 * are described below.
36 *
37 * Note that all reserved fields must be zeroes.
38 */
39struct type6_hdr {
40 unsigned char reserved1; /* 0x00 */
41 unsigned char type; /* 0x06 */
42 unsigned char reserved2[2]; /* 0x0000 */
43 unsigned char right[4]; /* 0x00000000 */
44 unsigned char reserved3[2]; /* 0x0000 */
45 unsigned char reserved4[2]; /* 0x0000 */
46 unsigned char apfs[4]; /* 0x00000000 */
47 unsigned int offset1; /* 0x00000058 (offset to CPRB) */
48 unsigned int offset2; /* 0x00000000 */
49 unsigned int offset3; /* 0x00000000 */
50 unsigned int offset4; /* 0x00000000 */
51 unsigned char agent_id[16]; /* PCICC: */
52 /* 0x0100 */
53 /* 0x4343412d4150504c202020 */
54 /* 0x010101 */
55 /* PCIXCC: */
56 /* 0x4341000000000000 */
57 /* 0x0000000000000000 */
58 unsigned char rqid[2]; /* rqid. internal to 603 */
59 unsigned char reserved5[2]; /* 0x0000 */
60 unsigned char function_code[2]; /* for PKD, 0x5044 (ascii 'PD') */
61 unsigned char reserved6[2]; /* 0x0000 */
62 unsigned int ToCardLen1; /* (request CPRB len + 3) & -4 */
63 unsigned int ToCardLen2; /* db len 0x00000000 for PKD */
64 unsigned int ToCardLen3; /* 0x00000000 */
65 unsigned int ToCardLen4; /* 0x00000000 */
66 unsigned int FromCardLen1; /* response buffer length */
67 unsigned int FromCardLen2; /* db len 0x00000000 for PKD */
68 unsigned int FromCardLen3; /* 0x00000000 */
69 unsigned int FromCardLen4; /* 0x00000000 */
70} __attribute__((packed));
71
72/**
73 * CPRB
74 * Note that all shorts, ints and longs are little-endian.
75 * All pointer fields are 32-bits long, and mean nothing
76 *
77 * A request CPRB is followed by a request_parameter_block.
78 *
79 * The request (or reply) parameter block is organized thus:
80 * function code
81 * VUD block
82 * key block
83 */
84struct CPRB {
85 unsigned short cprb_len; /* CPRB length */
86 unsigned char cprb_ver_id; /* CPRB version id. */
87 unsigned char pad_000; /* Alignment pad byte. */
88 unsigned char srpi_rtcode[4]; /* SRPI return code LELONG */
89 unsigned char srpi_verb; /* SRPI verb type */
90 unsigned char flags; /* flags */
91 unsigned char func_id[2]; /* function id */
92 unsigned char checkpoint_flag; /* */
93 unsigned char resv2; /* reserved */
94 unsigned short req_parml; /* request parameter buffer */
95 /* length 16-bit little endian */
96 unsigned char req_parmp[4]; /* request parameter buffer *
97 * pointer (means nothing: the *
98 * parameter buffer follows *
99 * the CPRB). */
100 unsigned char req_datal[4]; /* request data buffer */
101 /* length ULELONG */
102 unsigned char req_datap[4]; /* request data buffer */
103 /* pointer */
104 unsigned short rpl_parml; /* reply parameter buffer */
105 /* length 16-bit little endian */
106 unsigned char pad_001[2]; /* Alignment pad bytes. ULESHORT */
107 unsigned char rpl_parmp[4]; /* reply parameter buffer *
108 * pointer (means nothing: the *
109 * parameter buffer follows *
110 * the CPRB). */
111 unsigned char rpl_datal[4]; /* reply data buffer len ULELONG */
112 unsigned char rpl_datap[4]; /* reply data buffer */
113 /* pointer */
114 unsigned short ccp_rscode; /* server reason code ULESHORT */
115 unsigned short ccp_rtcode; /* server return code ULESHORT */
116 unsigned char repd_parml[2]; /* replied parameter len ULESHORT*/
117 unsigned char mac_data_len[2]; /* Mac Data Length ULESHORT */
118 unsigned char repd_datal[4]; /* replied data length ULELONG */
119 unsigned char req_pc[2]; /* PC identifier */
120 unsigned char res_origin[8]; /* resource origin */
121 unsigned char mac_value[8]; /* Mac Value */
122 unsigned char logon_id[8]; /* Logon Identifier */
123 unsigned char usage_domain[2]; /* cdx */
124 unsigned char resv3[18]; /* reserved for requestor */
125 unsigned short svr_namel; /* server name length ULESHORT */
126 unsigned char svr_name[8]; /* server name */
127} __attribute__((packed));
128
129/**
130 * The type 86 message family is associated with PCICC and PCIXCC cards.
131 *
132 * It contains a message header followed by a CPRB. The CPRB is
133 * the same as the request CPRB, which is described above.
134 *
135 * If format is 1, an error condition exists and no data beyond
136 * the 8-byte message header is of interest.
137 *
138 * The non-error message is shown below.
139 *
140 * Note that all reserved fields must be zeroes.
141 */
142struct type86_hdr {
143 unsigned char reserved1; /* 0x00 */
144 unsigned char type; /* 0x86 */
145 unsigned char format; /* 0x01 (error) or 0x02 (ok) */
146 unsigned char reserved2; /* 0x00 */
147 unsigned char reply_code; /* reply code (see above) */
148 unsigned char reserved3[3]; /* 0x000000 */
149} __attribute__((packed));
150
151#define TYPE86_RSP_CODE 0x86
152#define TYPE86_FMT2 0x02
153
154struct type86_fmt2_ext {
155 unsigned char reserved[4]; /* 0x00000000 */
156 unsigned char apfs[4]; /* final status */
157 unsigned int count1; /* length of CPRB + parameters */
158 unsigned int offset1; /* offset to CPRB */
159 unsigned int count2; /* 0x00000000 */
160 unsigned int offset2; /* db offset 0x00000000 for PKD */
161 unsigned int count3; /* 0x00000000 */
162 unsigned int offset3; /* 0x00000000 */
163 unsigned int count4; /* 0x00000000 */
164 unsigned int offset4; /* 0x00000000 */
165} __attribute__((packed));
166
167struct function_and_rules_block {
168 unsigned char function_code[2];
169 unsigned short ulen;
170 unsigned char only_rule[8];
171} __attribute__((packed));
172
173int zcrypt_pcicc_init(void);
174void zcrypt_pcicc_exit(void);
175
176#endif /* _ZCRYPT_PCICC_H_ */
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
new file mode 100644
index 000000000000..2da8b9381407
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -0,0 +1,951 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_pcixcc.c
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 * Ralph Wuerthner <rwuerthn@de.ibm.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/err.h>
32#include <linux/delay.h>
33#include <asm/atomic.h>
34#include <asm/uaccess.h>
35
36#include "ap_bus.h"
37#include "zcrypt_api.h"
38#include "zcrypt_error.h"
39#include "zcrypt_pcicc.h"
40#include "zcrypt_pcixcc.h"
41#include "zcrypt_cca_key.h"
42
43#define PCIXCC_MIN_MOD_SIZE 16 /* 128 bits */
44#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */
45#define PCIXCC_MAX_MOD_SIZE 256 /* 2048 bits */
46
47#define PCIXCC_MCL2_SPEED_RATING 7870 /* FIXME: needs finetuning */
48#define PCIXCC_MCL3_SPEED_RATING 7870
49#define CEX2C_SPEED_RATING 8540
50
51#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */
52#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
53
54#define PCIXCC_MAX_XCRB_MESSAGE_SIZE (12*1024)
55#define PCIXCC_MAX_XCRB_RESPONSE_SIZE PCIXCC_MAX_XCRB_MESSAGE_SIZE
56#define PCIXCC_MAX_XCRB_DATA_SIZE (11*1024)
57#define PCIXCC_MAX_XCRB_REPLY_SIZE (5*1024)
58
59#define PCIXCC_MAX_RESPONSE_SIZE PCIXCC_MAX_XCRB_RESPONSE_SIZE
60
61#define PCIXCC_CLEANUP_TIME (15*HZ)
62
63#define CEIL4(x) ((((x)+3)/4)*4)
64
65struct response_type {
66 struct completion work;
67 int type;
68};
69#define PCIXCC_RESPONSE_TYPE_ICA 0
70#define PCIXCC_RESPONSE_TYPE_XCRB 1
71
72static struct ap_device_id zcrypt_pcixcc_ids[] = {
73 { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) },
74 { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) },
75 { /* end of list */ },
76};
77
78#ifndef CONFIG_ZCRYPT_MONOLITHIC
79MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids);
80MODULE_AUTHOR("IBM Corporation");
81MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, "
82 "Copyright 2001, 2006 IBM Corporation");
83MODULE_LICENSE("GPL");
84#endif
85
86static int zcrypt_pcixcc_probe(struct ap_device *ap_dev);
87static void zcrypt_pcixcc_remove(struct ap_device *ap_dev);
88static void zcrypt_pcixcc_receive(struct ap_device *, struct ap_message *,
89 struct ap_message *);
90
91static struct ap_driver zcrypt_pcixcc_driver = {
92 .probe = zcrypt_pcixcc_probe,
93 .remove = zcrypt_pcixcc_remove,
94 .receive = zcrypt_pcixcc_receive,
95 .ids = zcrypt_pcixcc_ids,
96};
97
98/**
99 * The following is used to initialize the CPRBX passed to the PCIXCC/CEX2C
100 * card in a type6 message. The 3 fields that must be filled in at execution
101 * time are req_parml, rpl_parml and usage_domain.
102 * Everything about this interface is ascii/big-endian, since the
103 * device does *not* have 'Intel inside'.
104 *
105 * The CPRBX is followed immediately by the parm block.
106 * The parm block contains:
107 * - function code ('PD' 0x5044 or 'PK' 0x504B)
108 * - rule block (one of:)
109 * + 0x000A 'PKCS-1.2' (MCL2 'PD')
110 * + 0x000A 'ZERO-PAD' (MCL2 'PK')
111 * + 0x000A 'ZERO-PAD' (MCL3 'PD' or CEX2C 'PD')
112 * + 0x000A 'MRP ' (MCL3 'PK' or CEX2C 'PK')
113 * - VUD block
114 */
115static struct CPRBX static_cprbx = {
116 .cprb_len = 0x00DC,
117 .cprb_ver_id = 0x02,
118 .func_id = {0x54,0x32},
119};
120
121/**
122 * Convert a ICAMEX message to a type6 MEX message.
123 *
124 * @zdev: crypto device pointer
125 * @ap_msg: pointer to AP message
126 * @mex: pointer to user input data
127 *
128 * Returns 0 on success or -EFAULT.
129 */
130static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
131 struct ap_message *ap_msg,
132 struct ica_rsa_modexpo *mex)
133{
134 static struct type6_hdr static_type6_hdrX = {
135 .type = 0x06,
136 .offset1 = 0x00000058,
137 .agent_id = {'C','A',},
138 .function_code = {'P','K'},
139 };
140 static struct function_and_rules_block static_pke_fnr = {
141 .function_code = {'P','K'},
142 .ulen = 10,
143 .only_rule = {'M','R','P',' ',' ',' ',' ',' '}
144 };
145 static struct function_and_rules_block static_pke_fnr_MCL2 = {
146 .function_code = {'P','K'},
147 .ulen = 10,
148 .only_rule = {'Z','E','R','O','-','P','A','D'}
149 };
150 struct {
151 struct type6_hdr hdr;
152 struct CPRBX cprbx;
153 struct function_and_rules_block fr;
154 unsigned short length;
155 char text[0];
156 } __attribute__((packed)) *msg = ap_msg->message;
157 int size;
158
159 /* VUD.ciphertext */
160 msg->length = mex->inputdatalength + 2;
161 if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength))
162 return -EFAULT;
163
164 /* Set up key which is located after the variable length text. */
165 size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength, 1);
166 if (size < 0)
167 return size;
168 size += sizeof(*msg) + mex->inputdatalength;
169
170 /* message header, cprbx and f&r */
171 msg->hdr = static_type6_hdrX;
172 msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
173 msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
174
175 msg->cprbx = static_cprbx;
176 msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
177 msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1;
178
179 msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
180 static_pke_fnr_MCL2 : static_pke_fnr;
181
182 msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx);
183
184 ap_msg->length = size;
185 return 0;
186}
187
188/**
189 * Convert a ICACRT message to a type6 CRT message.
190 *
191 * @zdev: crypto device pointer
192 * @ap_msg: pointer to AP message
193 * @crt: pointer to user input data
194 *
195 * Returns 0 on success or -EFAULT.
196 */
197static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
198 struct ap_message *ap_msg,
199 struct ica_rsa_modexpo_crt *crt)
200{
201 static struct type6_hdr static_type6_hdrX = {
202 .type = 0x06,
203 .offset1 = 0x00000058,
204 .agent_id = {'C','A',},
205 .function_code = {'P','D'},
206 };
207 static struct function_and_rules_block static_pkd_fnr = {
208 .function_code = {'P','D'},
209 .ulen = 10,
210 .only_rule = {'Z','E','R','O','-','P','A','D'}
211 };
212
213 static struct function_and_rules_block static_pkd_fnr_MCL2 = {
214 .function_code = {'P','D'},
215 .ulen = 10,
216 .only_rule = {'P','K','C','S','-','1','.','2'}
217 };
218 struct {
219 struct type6_hdr hdr;
220 struct CPRBX cprbx;
221 struct function_and_rules_block fr;
222 unsigned short length;
223 char text[0];
224 } __attribute__((packed)) *msg = ap_msg->message;
225 int size;
226
227 /* VUD.ciphertext */
228 msg->length = crt->inputdatalength + 2;
229 if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength))
230 return -EFAULT;
231
232 /* Set up key which is located after the variable length text. */
233 size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 1);
234 if (size < 0)
235 return size;
236 size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */
237
238 /* message header, cprbx and f&r */
239 msg->hdr = static_type6_hdrX;
240 msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
241 msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
242
243 msg->cprbx = static_cprbx;
244 msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
245 msg->cprbx.req_parml = msg->cprbx.rpl_msgbl =
246 size - sizeof(msg->hdr) - sizeof(msg->cprbx);
247
248 msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
249 static_pkd_fnr_MCL2 : static_pkd_fnr;
250
251 ap_msg->length = size;
252 return 0;
253}
254
255/**
256 * Convert a XCRB message to a type6 CPRB message.
257 *
258 * @zdev: crypto device pointer
259 * @ap_msg: pointer to AP message
260 * @xcRB: pointer to user input data
261 *
262 * Returns 0 on success or -EFAULT.
263 */
264struct type86_fmt2_msg {
265 struct type86_hdr hdr;
266 struct type86_fmt2_ext fmt2;
267} __attribute__((packed));
268
269static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
270 struct ap_message *ap_msg,
271 struct ica_xcRB *xcRB)
272{
273 static struct type6_hdr static_type6_hdrX = {
274 .type = 0x06,
275 .offset1 = 0x00000058,
276 };
277 struct {
278 struct type6_hdr hdr;
279 struct ica_CPRBX cprbx;
280 } __attribute__((packed)) *msg = ap_msg->message;
281
282 int rcblen = CEIL4(xcRB->request_control_blk_length);
283 int replylen;
284 char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen;
285 char *function_code;
286
287 /* length checks */
288 ap_msg->length = sizeof(struct type6_hdr) +
289 CEIL4(xcRB->request_control_blk_length) +
290 xcRB->request_data_length;
291 if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE) {
292 PRINTK("Combined message is too large (%ld/%d/%d).\n",
293 sizeof(struct type6_hdr),
294 xcRB->request_control_blk_length,
295 xcRB->request_data_length);
296 return -EFAULT;
297 }
298 if (CEIL4(xcRB->reply_control_blk_length) >
299 PCIXCC_MAX_XCRB_REPLY_SIZE) {
300 PDEBUG("Reply CPRB length is too large (%d).\n",
301 xcRB->request_control_blk_length);
302 return -EFAULT;
303 }
304 if (CEIL4(xcRB->reply_data_length) > PCIXCC_MAX_XCRB_DATA_SIZE) {
305 PDEBUG("Reply data block length is too large (%d).\n",
306 xcRB->reply_data_length);
307 return -EFAULT;
308 }
309 replylen = CEIL4(xcRB->reply_control_blk_length) +
310 CEIL4(xcRB->reply_data_length) +
311 sizeof(struct type86_fmt2_msg);
312 if (replylen > PCIXCC_MAX_XCRB_RESPONSE_SIZE) {
313 PDEBUG("Reply CPRB + data block > PCIXCC_MAX_XCRB_RESPONSE_SIZE"
314 " (%d/%d/%d).\n",
315 sizeof(struct type86_fmt2_msg),
316 xcRB->reply_control_blk_length,
317 xcRB->reply_data_length);
318 xcRB->reply_control_blk_length = PCIXCC_MAX_XCRB_RESPONSE_SIZE -
319 (sizeof(struct type86_fmt2_msg) +
320 CEIL4(xcRB->reply_data_length));
321 PDEBUG("Capping Reply CPRB length at %d\n",
322 xcRB->reply_control_blk_length);
323 }
324
325 /* prepare type6 header */
326 msg->hdr = static_type6_hdrX;
327 memcpy(msg->hdr.agent_id , &(xcRB->agent_ID), sizeof(xcRB->agent_ID));
328 msg->hdr.ToCardLen1 = xcRB->request_control_blk_length;
329 if (xcRB->request_data_length) {
330 msg->hdr.offset2 = msg->hdr.offset1 + rcblen;
331 msg->hdr.ToCardLen2 = xcRB->request_data_length;
332 }
333 msg->hdr.FromCardLen1 = xcRB->reply_control_blk_length;
334 msg->hdr.FromCardLen2 = xcRB->reply_data_length;
335
336 /* prepare CPRB */
337 if (copy_from_user(&(msg->cprbx), xcRB->request_control_blk_addr,
338 xcRB->request_control_blk_length))
339 return -EFAULT;
340 if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
341 xcRB->request_control_blk_length) {
342 PDEBUG("cprb_len too large (%d/%d)\n", msg->cprbx.cprb_len,
343 xcRB->request_control_blk_length);
344 return -EFAULT;
345 }
346 function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
347 memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code));
348
349 /* copy data block */
350 if (xcRB->request_data_length &&
351 copy_from_user(req_data, xcRB->request_data_address,
352 xcRB->request_data_length))
353 return -EFAULT;
354 return 0;
355}
356
357/**
358 * Copy results from a type 86 ICA reply message back to user space.
359 *
360 * @zdev: crypto device pointer
361 * @reply: reply AP message.
362 * @data: pointer to user output data
363 * @length: size of user output data
364 *
365 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
366 */
367struct type86x_reply {
368 struct type86_hdr hdr;
369 struct type86_fmt2_ext fmt2;
370 struct CPRBX cprbx;
371 unsigned char pad[4]; /* 4 byte function code/rules block ? */
372 unsigned short length;
373 char text[0];
374} __attribute__((packed));
375
376static int convert_type86_ica(struct zcrypt_device *zdev,
377 struct ap_message *reply,
378 char __user *outputdata,
379 unsigned int outputdatalength)
380{
381 static unsigned char static_pad[] = {
382 0x00,0x02,
383 0x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD,
384 0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57,
385 0x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B,
386 0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39,
387 0xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5,
388 0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D,
389 0x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB,
390 0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F,
391 0x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9,
392 0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45,
393 0x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9,
394 0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F,
395 0x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD,
396 0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D,
397 0xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD,
398 0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9,
399 0x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B,
400 0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B,
401 0x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B,
402 0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD,
403 0x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7,
404 0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1,
405 0x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3,
406 0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23,
407 0x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55,
408 0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43,
409 0x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F,
410 0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F,
411 0x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5,
412 0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD,
413 0x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41,
414 0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09
415 };
416 struct type86x_reply *msg = reply->message;
417 unsigned short service_rc, service_rs;
418 unsigned int reply_len, pad_len;
419 char *data;
420
421 service_rc = msg->cprbx.ccp_rtcode;
422 if (unlikely(service_rc != 0)) {
423 service_rs = msg->cprbx.ccp_rscode;
424 if (service_rc == 8 && service_rs == 66) {
425 PDEBUG("Bad block format on PCIXCC/CEX2C\n");
426 return -EINVAL;
427 }
428 if (service_rc == 8 && service_rs == 65) {
429 PDEBUG("Probably an even modulus on PCIXCC/CEX2C\n");
430 return -EINVAL;
431 }
432 if (service_rc == 8 && service_rs == 770) {
433 PDEBUG("Invalid key length on PCIXCC/CEX2C\n");
434 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
435 return -EAGAIN;
436 }
437 if (service_rc == 8 && service_rs == 783) {
438 PDEBUG("Extended bitlengths not enabled on PCIXCC/CEX2C\n");
439 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
440 return -EAGAIN;
441 }
442 PRINTK("Unknown service rc/rs (PCIXCC/CEX2C): %d/%d\n",
443 service_rc, service_rs);
444 zdev->online = 0;
445 return -EAGAIN; /* repeat the request on a different device. */
446 }
447 data = msg->text;
448 reply_len = msg->length - 2;
449 if (reply_len > outputdatalength)
450 return -EINVAL;
451 /**
452 * For all encipher requests, the length of the ciphertext (reply_len)
453 * will always equal the modulus length. For MEX decipher requests
454 * the output needs to get padded. Minimum pad size is 10.
455 *
456 * Currently, the cases where padding will be added is for:
457 * - PCIXCC_MCL2 using a CRT form token (since PKD didn't support
458 * ZERO-PAD and CRT is only supported for PKD requests)
459 * - PCICC, always
460 */
461 pad_len = outputdatalength - reply_len;
462 if (pad_len > 0) {
463 if (pad_len < 10)
464 return -EINVAL;
465 /* 'restore' padding left in the PCICC/PCIXCC card. */
466 if (copy_to_user(outputdata, static_pad, pad_len - 1))
467 return -EFAULT;
468 if (put_user(0, outputdata + pad_len - 1))
469 return -EFAULT;
470 }
471 /* Copy the crypto response to user space. */
472 if (copy_to_user(outputdata + pad_len, data, reply_len))
473 return -EFAULT;
474 return 0;
475}
476
477/**
478 * Copy results from a type 86 XCRB reply message back to user space.
479 *
480 * @zdev: crypto device pointer
481 * @reply: reply AP message.
482 * @xcRB: pointer to XCRB
483 *
484 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
485 */
486static int convert_type86_xcrb(struct zcrypt_device *zdev,
487 struct ap_message *reply,
488 struct ica_xcRB *xcRB)
489{
490 struct type86_fmt2_msg *msg = reply->message;
491 char *data = reply->message;
492
493 /* Copy CPRB to user */
494 if (copy_to_user(xcRB->reply_control_blk_addr,
495 data + msg->fmt2.offset1, msg->fmt2.count1))
496 return -EFAULT;
497 xcRB->reply_control_blk_length = msg->fmt2.count1;
498
499 /* Copy data buffer to user */
500 if (msg->fmt2.count2)
501 if (copy_to_user(xcRB->reply_data_addr,
502 data + msg->fmt2.offset2, msg->fmt2.count2))
503 return -EFAULT;
504 xcRB->reply_data_length = msg->fmt2.count2;
505 return 0;
506}
507
508static int convert_response_ica(struct zcrypt_device *zdev,
509 struct ap_message *reply,
510 char __user *outputdata,
511 unsigned int outputdatalength)
512{
513 struct type86x_reply *msg = reply->message;
514
515 /* Response type byte is the second byte in the response. */
516 switch (((unsigned char *) reply->message)[1]) {
517 case TYPE82_RSP_CODE:
518 case TYPE88_RSP_CODE:
519 return convert_error(zdev, reply);
520 case TYPE86_RSP_CODE:
521 if (msg->hdr.reply_code)
522 return convert_error(zdev, reply);
523 if (msg->cprbx.cprb_ver_id == 0x02)
524 return convert_type86_ica(zdev, reply,
525 outputdata, outputdatalength);
526 /* no break, incorrect cprb version is an unknown response */
527 default: /* Unknown response type, this should NEVER EVER happen */
528 PRINTK("Unrecognized Message Header: %08x%08x\n",
529 *(unsigned int *) reply->message,
530 *(unsigned int *) (reply->message+4));
531 zdev->online = 0;
532 return -EAGAIN; /* repeat the request on a different device. */
533 }
534}
535
536static int convert_response_xcrb(struct zcrypt_device *zdev,
537 struct ap_message *reply,
538 struct ica_xcRB *xcRB)
539{
540 struct type86x_reply *msg = reply->message;
541
542 /* Response type byte is the second byte in the response. */
543 switch (((unsigned char *) reply->message)[1]) {
544 case TYPE82_RSP_CODE:
545 case TYPE88_RSP_CODE:
546 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
547 return convert_error(zdev, reply);
548 case TYPE86_RSP_CODE:
549 if (msg->hdr.reply_code) {
550 memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32));
551 return convert_error(zdev, reply);
552 }
553 if (msg->cprbx.cprb_ver_id == 0x02)
554 return convert_type86_xcrb(zdev, reply, xcRB);
555 /* no break, incorrect cprb version is an unknown response */
556 default: /* Unknown response type, this should NEVER EVER happen */
557 PRINTK("Unrecognized Message Header: %08x%08x\n",
558 *(unsigned int *) reply->message,
559 *(unsigned int *) (reply->message+4));
560 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
561 zdev->online = 0;
562 return -EAGAIN; /* repeat the request on a different device. */
563 }
564}
565
566/**
567 * This function is called from the AP bus code after a crypto request
568 * "msg" has finished with the reply message "reply".
569 * It is called from tasklet context.
570 * @ap_dev: pointer to the AP device
571 * @msg: pointer to the AP message
572 * @reply: pointer to the AP reply message
573 */
574static void zcrypt_pcixcc_receive(struct ap_device *ap_dev,
575 struct ap_message *msg,
576 struct ap_message *reply)
577{
578 static struct error_hdr error_reply = {
579 .type = TYPE82_RSP_CODE,
580 .reply_code = REP82_ERROR_MACHINE_FAILURE,
581 };
582 struct response_type *resp_type =
583 (struct response_type *) msg->private;
584 struct type86x_reply *t86r = reply->message;
585 int length;
586
587 /* Copy the reply message to the request message buffer. */
588 if (IS_ERR(reply))
589 memcpy(msg->message, &error_reply, sizeof(error_reply));
590 else if (t86r->hdr.type == TYPE86_RSP_CODE &&
591 t86r->cprbx.cprb_ver_id == 0x02) {
592 switch (resp_type->type) {
593 case PCIXCC_RESPONSE_TYPE_ICA:
594 length = sizeof(struct type86x_reply)
595 + t86r->length - 2;
596 length = min(PCIXCC_MAX_ICA_RESPONSE_SIZE, length);
597 memcpy(msg->message, reply->message, length);
598 break;
599 case PCIXCC_RESPONSE_TYPE_XCRB:
600 length = t86r->fmt2.offset2 + t86r->fmt2.count2;
601 length = min(PCIXCC_MAX_XCRB_RESPONSE_SIZE, length);
602 memcpy(msg->message, reply->message, length);
603 break;
604 default:
605 PRINTK("Invalid internal response type: %i\n",
606 resp_type->type);
607 memcpy(msg->message, &error_reply,
608 sizeof error_reply);
609 }
610 } else
611 memcpy(msg->message, reply->message, sizeof error_reply);
612 complete(&(resp_type->work));
613}
614
615static atomic_t zcrypt_step = ATOMIC_INIT(0);
616
617/**
618 * The request distributor calls this function if it picked the PCIXCC/CEX2C
619 * device to handle a modexpo request.
620 * @zdev: pointer to zcrypt_device structure that identifies the
621 * PCIXCC/CEX2C device to the request distributor
622 * @mex: pointer to the modexpo request buffer
623 */
624static long zcrypt_pcixcc_modexpo(struct zcrypt_device *zdev,
625 struct ica_rsa_modexpo *mex)
626{
627 struct ap_message ap_msg;
628 struct response_type resp_type = {
629 .type = PCIXCC_RESPONSE_TYPE_ICA,
630 };
631 int rc;
632
633 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
634 if (!ap_msg.message)
635 return -ENOMEM;
636 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
637 atomic_inc_return(&zcrypt_step);
638 ap_msg.private = &resp_type;
639 rc = ICAMEX_msg_to_type6MEX_msgX(zdev, &ap_msg, mex);
640 if (rc)
641 goto out_free;
642 init_completion(&resp_type.work);
643 ap_queue_message(zdev->ap_dev, &ap_msg);
644 rc = wait_for_completion_interruptible_timeout(
645 &resp_type.work, PCIXCC_CLEANUP_TIME);
646 if (rc > 0)
647 rc = convert_response_ica(zdev, &ap_msg, mex->outputdata,
648 mex->outputdatalength);
649 else {
650 /* Signal pending or message timed out. */
651 ap_cancel_message(zdev->ap_dev, &ap_msg);
652 if (rc == 0)
653 /* Message timed out. */
654 rc = -ETIME;
655 }
656out_free:
657 free_page((unsigned long) ap_msg.message);
658 return rc;
659}
660
661/**
662 * The request distributor calls this function if it picked the PCIXCC/CEX2C
663 * device to handle a modexpo_crt request.
664 * @zdev: pointer to zcrypt_device structure that identifies the
665 * PCIXCC/CEX2C device to the request distributor
666 * @crt: pointer to the modexpoc_crt request buffer
667 */
668static long zcrypt_pcixcc_modexpo_crt(struct zcrypt_device *zdev,
669 struct ica_rsa_modexpo_crt *crt)
670{
671 struct ap_message ap_msg;
672 struct response_type resp_type = {
673 .type = PCIXCC_RESPONSE_TYPE_ICA,
674 };
675 int rc;
676
677 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
678 if (!ap_msg.message)
679 return -ENOMEM;
680 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
681 atomic_inc_return(&zcrypt_step);
682 ap_msg.private = &resp_type;
683 rc = ICACRT_msg_to_type6CRT_msgX(zdev, &ap_msg, crt);
684 if (rc)
685 goto out_free;
686 init_completion(&resp_type.work);
687 ap_queue_message(zdev->ap_dev, &ap_msg);
688 rc = wait_for_completion_interruptible_timeout(
689 &resp_type.work, PCIXCC_CLEANUP_TIME);
690 if (rc > 0)
691 rc = convert_response_ica(zdev, &ap_msg, crt->outputdata,
692 crt->outputdatalength);
693 else {
694 /* Signal pending or message timed out. */
695 ap_cancel_message(zdev->ap_dev, &ap_msg);
696 if (rc == 0)
697 /* Message timed out. */
698 rc = -ETIME;
699 }
700out_free:
701 free_page((unsigned long) ap_msg.message);
702 return rc;
703}
704
705/**
706 * The request distributor calls this function if it picked the PCIXCC/CEX2C
707 * device to handle a send_cprb request.
708 * @zdev: pointer to zcrypt_device structure that identifies the
709 * PCIXCC/CEX2C device to the request distributor
710 * @xcRB: pointer to the send_cprb request buffer
711 */
712long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev, struct ica_xcRB *xcRB)
713{
714 struct ap_message ap_msg;
715 struct response_type resp_type = {
716 .type = PCIXCC_RESPONSE_TYPE_XCRB,
717 };
718 int rc;
719
720 ap_msg.message = (void *) kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
721 if (!ap_msg.message)
722 return -ENOMEM;
723 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
724 atomic_inc_return(&zcrypt_step);
725 ap_msg.private = &resp_type;
726 rc = XCRB_msg_to_type6CPRB_msgX(zdev, &ap_msg, xcRB);
727 if (rc)
728 goto out_free;
729 init_completion(&resp_type.work);
730 ap_queue_message(zdev->ap_dev, &ap_msg);
731 rc = wait_for_completion_interruptible_timeout(
732 &resp_type.work, PCIXCC_CLEANUP_TIME);
733 if (rc > 0)
734 rc = convert_response_xcrb(zdev, &ap_msg, xcRB);
735 else {
736 /* Signal pending or message timed out. */
737 ap_cancel_message(zdev->ap_dev, &ap_msg);
738 if (rc == 0)
739 /* Message timed out. */
740 rc = -ETIME;
741 }
742out_free:
743 memset(ap_msg.message, 0x0, ap_msg.length);
744 kfree(ap_msg.message);
745 return rc;
746}
747
748/**
749 * The crypto operations for a PCIXCC/CEX2C card.
750 */
751static struct zcrypt_ops zcrypt_pcixcc_ops = {
752 .rsa_modexpo = zcrypt_pcixcc_modexpo,
753 .rsa_modexpo_crt = zcrypt_pcixcc_modexpo_crt,
754 .send_cprb = zcrypt_pcixcc_send_cprb,
755};
756
757/**
758 * Micro-code detection function. Its sends a message to a pcixcc card
759 * to find out the microcode level.
760 * @ap_dev: pointer to the AP device.
761 */
762static int zcrypt_pcixcc_mcl(struct ap_device *ap_dev)
763{
764 static unsigned char msg[] = {
765 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
766 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
767 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
768 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
769 0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,
770 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
771 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,
772 0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
773 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
774 0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
775 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
776 0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
777 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,
778 0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
779 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
780 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
781 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
782 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
783 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
784 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
785 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
786 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
787 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
788 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
789 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
790 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
791 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
792 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
793 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
794 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
795 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
796 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
797 0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,
798 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
799 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
800 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
801 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
802 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
803 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,
804 0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
805 0x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,
806 0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
807 0x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,
808 0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
809 0xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,
810 0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
811 0x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,
812 0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
813 0x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,
814 0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
815 0x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,
816 0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
817 0x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,
818 0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
819 0x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,
820 0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
821 0xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,
822 0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
823 0xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,
824 0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
825 0x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,
826 0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
827 0x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,
828 0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
829 0xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,
830 0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
831 0x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,
832 0xF1,0x3D,0x93,0x53
833 };
834 unsigned long long psmid;
835 struct CPRBX *cprbx;
836 char *reply;
837 int rc, i;
838
839 reply = (void *) get_zeroed_page(GFP_KERNEL);
840 if (!reply)
841 return -ENOMEM;
842
843 rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, msg, sizeof(msg));
844 if (rc)
845 goto out_free;
846
847 /* Wait for the test message to complete. */
848 for (i = 0; i < 6; i++) {
849 mdelay(300);
850 rc = ap_recv(ap_dev->qid, &psmid, reply, 4096);
851 if (rc == 0 && psmid == 0x0102030405060708ULL)
852 break;
853 }
854
855 if (i >= 6) {
856 /* Got no answer. */
857 rc = -ENODEV;
858 goto out_free;
859 }
860
861 cprbx = (struct CPRBX *) (reply + 48);
862 if (cprbx->ccp_rtcode == 8 && cprbx->ccp_rscode == 33)
863 rc = ZCRYPT_PCIXCC_MCL2;
864 else
865 rc = ZCRYPT_PCIXCC_MCL3;
866out_free:
867 free_page((unsigned long) reply);
868 return rc;
869}
870
871/**
872 * Probe function for PCIXCC/CEX2C cards. It always accepts the AP device
873 * since the bus_match already checked the hardware type. The PCIXCC
874 * cards come in two flavours: micro code level 2 and micro code level 3.
875 * This is checked by sending a test message to the device.
876 * @ap_dev: pointer to the AP device.
877 */
878static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
879{
880 struct zcrypt_device *zdev;
881 int rc;
882
883 zdev = zcrypt_device_alloc(PCIXCC_MAX_RESPONSE_SIZE);
884 if (!zdev)
885 return -ENOMEM;
886 zdev->ap_dev = ap_dev;
887 zdev->ops = &zcrypt_pcixcc_ops;
888 zdev->online = 1;
889 if (ap_dev->device_type == AP_DEVICE_TYPE_PCIXCC) {
890 rc = zcrypt_pcixcc_mcl(ap_dev);
891 if (rc < 0) {
892 zcrypt_device_free(zdev);
893 return rc;
894 }
895 zdev->user_space_type = rc;
896 if (rc == ZCRYPT_PCIXCC_MCL2) {
897 zdev->type_string = "PCIXCC_MCL2";
898 zdev->speed_rating = PCIXCC_MCL2_SPEED_RATING;
899 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
900 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
901 } else {
902 zdev->type_string = "PCIXCC_MCL3";
903 zdev->speed_rating = PCIXCC_MCL3_SPEED_RATING;
904 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
905 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
906 }
907 } else {
908 zdev->user_space_type = ZCRYPT_CEX2C;
909 zdev->type_string = "CEX2C";
910 zdev->speed_rating = CEX2C_SPEED_RATING;
911 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
912 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
913 }
914 ap_dev->reply = &zdev->reply;
915 ap_dev->private = zdev;
916 rc = zcrypt_device_register(zdev);
917 if (rc)
918 goto out_free;
919 return 0;
920
921 out_free:
922 ap_dev->private = NULL;
923 zcrypt_device_free(zdev);
924 return rc;
925}
926
927/**
928 * This is called to remove the extended PCIXCC/CEX2C driver information
929 * if an AP device is removed.
930 */
931static void zcrypt_pcixcc_remove(struct ap_device *ap_dev)
932{
933 struct zcrypt_device *zdev = ap_dev->private;
934
935 zcrypt_device_unregister(zdev);
936}
937
938int __init zcrypt_pcixcc_init(void)
939{
940 return ap_driver_register(&zcrypt_pcixcc_driver, THIS_MODULE, "pcixcc");
941}
942
943void zcrypt_pcixcc_exit(void)
944{
945 ap_driver_unregister(&zcrypt_pcixcc_driver);
946}
947
948#ifndef CONFIG_ZCRYPT_MONOLITHIC
949module_init(zcrypt_pcixcc_init);
950module_exit(zcrypt_pcixcc_exit);
951#endif
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.h b/drivers/s390/crypto/zcrypt_pcixcc.h
new file mode 100644
index 000000000000..a78ff307fd19
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_pcixcc.h
@@ -0,0 +1,79 @@
1/*
2 * linux/drivers/s390/crypto/zcrypt_pcixcc.h
3 *
4 * zcrypt 2.1.0
5 *
6 * Copyright (C) 2001, 2006 IBM Corporation
7 * Author(s): Robert Burroughs
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#ifndef _ZCRYPT_PCIXCC_H_
29#define _ZCRYPT_PCIXCC_H_
30
31/**
32 * CPRBX
33 * Note that all shorts and ints are big-endian.
34 * All pointer fields are 16 bytes long, and mean nothing.
35 *
36 * A request CPRB is followed by a request_parameter_block.
37 *
38 * The request (or reply) parameter block is organized thus:
39 * function code
40 * VUD block
41 * key block
42 */
43struct CPRBX {
44 unsigned short cprb_len; /* CPRB length 220 */
45 unsigned char cprb_ver_id; /* CPRB version id. 0x02 */
46 unsigned char pad_000[3]; /* Alignment pad bytes */
47 unsigned char func_id[2]; /* function id 0x5432 */
48 unsigned char cprb_flags[4]; /* Flags */
49 unsigned int req_parml; /* request parameter buffer len */
50 unsigned int req_datal; /* request data buffer */
51 unsigned int rpl_msgbl; /* reply message block length */
52 unsigned int rpld_parml; /* replied parameter block len */
53 unsigned int rpl_datal; /* reply data block len */
54 unsigned int rpld_datal; /* replied data block len */
55 unsigned int req_extbl; /* request extension block len */
56 unsigned char pad_001[4]; /* reserved */
57 unsigned int rpld_extbl; /* replied extension block len */
58 unsigned char req_parmb[16]; /* request parm block 'address' */
59 unsigned char req_datab[16]; /* request data block 'address' */
60 unsigned char rpl_parmb[16]; /* reply parm block 'address' */
61 unsigned char rpl_datab[16]; /* reply data block 'address' */
62 unsigned char req_extb[16]; /* request extension block 'addr'*/
63 unsigned char rpl_extb[16]; /* reply extension block 'addres'*/
64 unsigned short ccp_rtcode; /* server return code */
65 unsigned short ccp_rscode; /* server reason code */
66 unsigned int mac_data_len; /* Mac Data Length */
67 unsigned char logon_id[8]; /* Logon Identifier */
68 unsigned char mac_value[8]; /* Mac Value */
69 unsigned char mac_content_flgs;/* Mac content flag byte */
70 unsigned char pad_002; /* Alignment */
71 unsigned short domain; /* Domain */
72 unsigned char pad_003[12]; /* Domain masks */
73 unsigned char pad_004[36]; /* reserved */
74} __attribute__((packed));
75
76int zcrypt_pcixcc_init(void);
77void zcrypt_pcixcc_exit(void);
78
79#endif /* _ZCRYPT_PCIXCC_H_ */
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index 5399c5d99b81..a914129a4da9 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -19,9 +19,6 @@
19 19
20#include "s390mach.h" 20#include "s390mach.h"
21 21
22#define DBG printk
23// #define DBG(args,...) do {} while (0);
24
25static struct semaphore m_sem; 22static struct semaphore m_sem;
26 23
27extern int css_process_crw(int, int); 24extern int css_process_crw(int, int);
@@ -83,11 +80,11 @@ repeat:
83 ccode = stcrw(&crw[chain]); 80 ccode = stcrw(&crw[chain]);
84 if (ccode != 0) 81 if (ccode != 0)
85 break; 82 break;
86 DBG(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, " 83 printk(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, "
87 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 84 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
88 crw[chain].slct, crw[chain].oflw, crw[chain].chn, 85 crw[chain].slct, crw[chain].oflw, crw[chain].chn,
89 crw[chain].rsc, crw[chain].anc, crw[chain].erc, 86 crw[chain].rsc, crw[chain].anc, crw[chain].erc,
90 crw[chain].rsid); 87 crw[chain].rsid);
91 /* Check for overflows. */ 88 /* Check for overflows. */
92 if (crw[chain].oflw) { 89 if (crw[chain].oflw) {
93 pr_debug("%s: crw overflow detected!\n", __FUNCTION__); 90 pr_debug("%s: crw overflow detected!\n", __FUNCTION__);
@@ -117,8 +114,8 @@ repeat:
117 * reported to the common I/O layer. 114 * reported to the common I/O layer.
118 */ 115 */
119 if (crw[chain].slct) { 116 if (crw[chain].slct) {
120 DBG(KERN_INFO"solicited machine check for " 117 pr_debug("solicited machine check for "
121 "channel path %02X\n", crw[0].rsid); 118 "channel path %02X\n", crw[0].rsid);
122 break; 119 break;
123 } 120 }
124 switch (crw[0].erc) { 121 switch (crw[0].erc) {
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index adc9d8f2c28f..5d39b2df0cc4 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -189,6 +189,10 @@ struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *adapter,
189 struct zfcp_fsf_req *request, *tmp; 189 struct zfcp_fsf_req *request, *tmp;
190 unsigned int i; 190 unsigned int i;
191 191
192 /* 0 is reserved as an invalid req_id */
193 if (req_id == 0)
194 return NULL;
195
192 i = req_id % REQUEST_LIST_SIZE; 196 i = req_id % REQUEST_LIST_SIZE;
193 197
194 list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list) 198 list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list)
@@ -299,11 +303,45 @@ zfcp_init_device_configure(void)
299 return; 303 return;
300} 304}
301 305
306static int calc_alignment(int size)
307{
308 int align = 1;
309
310 if (!size)
311 return 0;
312
313 while ((size - align) > 0)
314 align <<= 1;
315
316 return align;
317}
318
302static int __init 319static int __init
303zfcp_module_init(void) 320zfcp_module_init(void)
304{ 321{
322 int retval = -ENOMEM;
323 int size, align;
324
325 size = sizeof(struct zfcp_fsf_req_qtcb);
326 align = calc_alignment(size);
327 zfcp_data.fsf_req_qtcb_cache =
328 kmem_cache_create("zfcp_fsf", size, align, 0, NULL, NULL);
329 if (!zfcp_data.fsf_req_qtcb_cache)
330 goto out;
305 331
306 int retval = 0; 332 size = sizeof(struct fsf_status_read_buffer);
333 align = calc_alignment(size);
334 zfcp_data.sr_buffer_cache =
335 kmem_cache_create("zfcp_sr", size, align, 0, NULL, NULL);
336 if (!zfcp_data.sr_buffer_cache)
337 goto out_sr_cache;
338
339 size = sizeof(struct zfcp_gid_pn_data);
340 align = calc_alignment(size);
341 zfcp_data.gid_pn_cache =
342 kmem_cache_create("zfcp_gid", size, align, 0, NULL, NULL);
343 if (!zfcp_data.gid_pn_cache)
344 goto out_gid_cache;
307 345
308 atomic_set(&zfcp_data.loglevel, loglevel); 346 atomic_set(&zfcp_data.loglevel, loglevel);
309 347
@@ -313,15 +351,16 @@ zfcp_module_init(void)
313 /* initialize adapters to be removed list head */ 351 /* initialize adapters to be removed list head */
314 INIT_LIST_HEAD(&zfcp_data.adapter_remove_lh); 352 INIT_LIST_HEAD(&zfcp_data.adapter_remove_lh);
315 353
316 zfcp_transport_template = fc_attach_transport(&zfcp_transport_functions); 354 zfcp_data.scsi_transport_template =
317 if (!zfcp_transport_template) 355 fc_attach_transport(&zfcp_transport_functions);
318 return -ENODEV; 356 if (!zfcp_data.scsi_transport_template)
357 goto out_transport;
319 358
320 retval = misc_register(&zfcp_cfdc_misc); 359 retval = misc_register(&zfcp_cfdc_misc);
321 if (retval != 0) { 360 if (retval != 0) {
322 ZFCP_LOG_INFO("registration of misc device " 361 ZFCP_LOG_INFO("registration of misc device "
323 "zfcp_cfdc failed\n"); 362 "zfcp_cfdc failed\n");
324 goto out; 363 goto out_misc;
325 } 364 }
326 365
327 ZFCP_LOG_TRACE("major/minor for zfcp_cfdc: %d/%d\n", 366 ZFCP_LOG_TRACE("major/minor for zfcp_cfdc: %d/%d\n",
@@ -333,9 +372,6 @@ zfcp_module_init(void)
333 /* initialise configuration rw lock */ 372 /* initialise configuration rw lock */
334 rwlock_init(&zfcp_data.config_lock); 373 rwlock_init(&zfcp_data.config_lock);
335 374
336 /* save address of data structure managing the driver module */
337 zfcp_data.scsi_host_template.module = THIS_MODULE;
338
339 /* setup dynamic I/O */ 375 /* setup dynamic I/O */
340 retval = zfcp_ccw_register(); 376 retval = zfcp_ccw_register();
341 if (retval) { 377 if (retval) {
@@ -350,6 +386,14 @@ zfcp_module_init(void)
350 386
351 out_ccw_register: 387 out_ccw_register:
352 misc_deregister(&zfcp_cfdc_misc); 388 misc_deregister(&zfcp_cfdc_misc);
389 out_misc:
390 fc_release_transport(zfcp_data.scsi_transport_template);
391 out_transport:
392 kmem_cache_destroy(zfcp_data.gid_pn_cache);
393 out_gid_cache:
394 kmem_cache_destroy(zfcp_data.sr_buffer_cache);
395 out_sr_cache:
396 kmem_cache_destroy(zfcp_data.fsf_req_qtcb_cache);
353 out: 397 out:
354 return retval; 398 return retval;
355} 399}
@@ -935,20 +979,20 @@ static int
935zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) 979zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
936{ 980{
937 adapter->pool.fsf_req_erp = 981 adapter->pool.fsf_req_erp =
938 mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_ERP_NR, 982 mempool_create_slab_pool(ZFCP_POOL_FSF_REQ_ERP_NR,
939 sizeof(struct zfcp_fsf_req_pool_element)); 983 zfcp_data.fsf_req_qtcb_cache);
940 if (!adapter->pool.fsf_req_erp) 984 if (!adapter->pool.fsf_req_erp)
941 return -ENOMEM; 985 return -ENOMEM;
942 986
943 adapter->pool.fsf_req_scsi = 987 adapter->pool.fsf_req_scsi =
944 mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_SCSI_NR, 988 mempool_create_slab_pool(ZFCP_POOL_FSF_REQ_SCSI_NR,
945 sizeof(struct zfcp_fsf_req_pool_element)); 989 zfcp_data.fsf_req_qtcb_cache);
946 if (!adapter->pool.fsf_req_scsi) 990 if (!adapter->pool.fsf_req_scsi)
947 return -ENOMEM; 991 return -ENOMEM;
948 992
949 adapter->pool.fsf_req_abort = 993 adapter->pool.fsf_req_abort =
950 mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_ABORT_NR, 994 mempool_create_slab_pool(ZFCP_POOL_FSF_REQ_ABORT_NR,
951 sizeof(struct zfcp_fsf_req_pool_element)); 995 zfcp_data.fsf_req_qtcb_cache);
952 if (!adapter->pool.fsf_req_abort) 996 if (!adapter->pool.fsf_req_abort)
953 return -ENOMEM; 997 return -ENOMEM;
954 998
@@ -959,14 +1003,14 @@ zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
959 return -ENOMEM; 1003 return -ENOMEM;
960 1004
961 adapter->pool.data_status_read = 1005 adapter->pool.data_status_read =
962 mempool_create_kmalloc_pool(ZFCP_POOL_STATUS_READ_NR, 1006 mempool_create_slab_pool(ZFCP_POOL_STATUS_READ_NR,
963 sizeof(struct fsf_status_read_buffer)); 1007 zfcp_data.sr_buffer_cache);
964 if (!adapter->pool.data_status_read) 1008 if (!adapter->pool.data_status_read)
965 return -ENOMEM; 1009 return -ENOMEM;
966 1010
967 adapter->pool.data_gid_pn = 1011 adapter->pool.data_gid_pn =
968 mempool_create_kmalloc_pool(ZFCP_POOL_DATA_GID_PN_NR, 1012 mempool_create_slab_pool(ZFCP_POOL_DATA_GID_PN_NR,
969 sizeof(struct zfcp_gid_pn_data)); 1013 zfcp_data.gid_pn_cache);
970 if (!adapter->pool.data_gid_pn) 1014 if (!adapter->pool.data_gid_pn)
971 return -ENOMEM; 1015 return -ENOMEM;
972 1016
@@ -1091,9 +1135,6 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
1091 /* initialize lock of associated request queue */ 1135 /* initialize lock of associated request queue */
1092 rwlock_init(&adapter->request_queue.queue_lock); 1136 rwlock_init(&adapter->request_queue.queue_lock);
1093 1137
1094 /* intitialise SCSI ER timer */
1095 init_timer(&adapter->scsi_er_timer);
1096
1097 /* mark adapter unusable as long as sysfs registration is not complete */ 1138 /* mark adapter unusable as long as sysfs registration is not complete */
1098 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); 1139 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
1099 1140
@@ -1609,7 +1650,6 @@ zfcp_ns_gid_pn_request(struct zfcp_erp_action *erp_action)
1609 gid_pn->ct.handler = zfcp_ns_gid_pn_handler; 1650 gid_pn->ct.handler = zfcp_ns_gid_pn_handler;
1610 gid_pn->ct.handler_data = (unsigned long) gid_pn; 1651 gid_pn->ct.handler_data = (unsigned long) gid_pn;
1611 gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT; 1652 gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
1612 gid_pn->ct.timer = &erp_action->timer;
1613 gid_pn->port = erp_action->port; 1653 gid_pn->port = erp_action->port;
1614 1654
1615 ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp, 1655 ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp,
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index fdabadeaa9ee..81680efa1721 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -275,19 +275,6 @@ zfcp_ccw_register(void)
275} 275}
276 276
277/** 277/**
278 * zfcp_ccw_unregister - ccw unregister function
279 *
280 * Unregisters the driver from common i/o layer. Function will be called at
281 * module unload/system shutdown.
282 */
283void __exit
284zfcp_ccw_unregister(void)
285{
286 zfcp_sysfs_driver_remove_files(&zfcp_ccw_driver.driver);
287 ccw_driver_unregister(&zfcp_ccw_driver);
288}
289
290/**
291 * zfcp_ccw_shutdown - gets called on reboot/shutdown 278 * zfcp_ccw_shutdown - gets called on reboot/shutdown
292 * 279 *
293 * Makes sure that QDIO queues are down when the system gets stopped. 280 * Makes sure that QDIO queues are down when the system gets stopped.
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index c033145d0f19..0aa3b1ac76af 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -707,7 +707,7 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
707 struct zfcp_adapter *adapter, 707 struct zfcp_adapter *adapter,
708 struct scsi_cmnd *scsi_cmnd, 708 struct scsi_cmnd *scsi_cmnd,
709 struct zfcp_fsf_req *fsf_req, 709 struct zfcp_fsf_req *fsf_req,
710 struct zfcp_fsf_req *old_fsf_req) 710 unsigned long old_req_id)
711{ 711{
712 struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf; 712 struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf;
713 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; 713 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
@@ -768,8 +768,7 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
768 rec->fsf_seqno = fsf_req->seq_no; 768 rec->fsf_seqno = fsf_req->seq_no;
769 rec->fsf_issued = fsf_req->issued; 769 rec->fsf_issued = fsf_req->issued;
770 } 770 }
771 rec->type.old_fsf_reqid = 771 rec->type.old_fsf_reqid = old_req_id;
772 (unsigned long) old_fsf_req;
773 } else { 772 } else {
774 strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE); 773 strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
775 dump->total_size = buflen; 774 dump->total_size = buflen;
@@ -794,17 +793,17 @@ zfcp_scsi_dbf_event_result(const char *tag, int level,
794 struct zfcp_fsf_req *fsf_req) 793 struct zfcp_fsf_req *fsf_req)
795{ 794{
796 _zfcp_scsi_dbf_event_common("rslt", tag, level, 795 _zfcp_scsi_dbf_event_common("rslt", tag, level,
797 adapter, scsi_cmnd, fsf_req, NULL); 796 adapter, scsi_cmnd, fsf_req, 0);
798} 797}
799 798
800inline void 799inline void
801zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, 800zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter,
802 struct scsi_cmnd *scsi_cmnd, 801 struct scsi_cmnd *scsi_cmnd,
803 struct zfcp_fsf_req *new_fsf_req, 802 struct zfcp_fsf_req *new_fsf_req,
804 struct zfcp_fsf_req *old_fsf_req) 803 unsigned long old_req_id)
805{ 804{
806 _zfcp_scsi_dbf_event_common("abrt", tag, 1, 805 _zfcp_scsi_dbf_event_common("abrt", tag, 1,
807 adapter, scsi_cmnd, new_fsf_req, old_fsf_req); 806 adapter, scsi_cmnd, new_fsf_req, old_req_id);
808} 807}
809 808
810inline void 809inline void
@@ -814,7 +813,7 @@ zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit,
814 struct zfcp_adapter *adapter = unit->port->adapter; 813 struct zfcp_adapter *adapter = unit->port->adapter;
815 814
816 _zfcp_scsi_dbf_event_common(flag == FCP_TARGET_RESET ? "trst" : "lrst", 815 _zfcp_scsi_dbf_event_common(flag == FCP_TARGET_RESET ? "trst" : "lrst",
817 tag, 1, adapter, scsi_cmnd, NULL, NULL); 816 tag, 1, adapter, scsi_cmnd, NULL, 0);
818} 817}
819 818
820static int 819static int
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 94d1b74db356..8f882690994d 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -19,7 +19,6 @@
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 20 */
21 21
22
23#ifndef ZFCP_DEF_H 22#ifndef ZFCP_DEF_H
24#define ZFCP_DEF_H 23#define ZFCP_DEF_H
25 24
@@ -32,6 +31,10 @@
32#include <linux/blkdev.h> 31#include <linux/blkdev.h>
33#include <linux/delay.h> 32#include <linux/delay.h>
34#include <linux/timer.h> 33#include <linux/timer.h>
34#include <linux/slab.h>
35#include <linux/mempool.h>
36#include <linux/syscalls.h>
37#include <linux/ioctl.h>
35#include <scsi/scsi.h> 38#include <scsi/scsi.h>
36#include <scsi/scsi_tcq.h> 39#include <scsi/scsi_tcq.h>
37#include <scsi/scsi_cmnd.h> 40#include <scsi/scsi_cmnd.h>
@@ -39,14 +42,11 @@
39#include <scsi/scsi_host.h> 42#include <scsi/scsi_host.h>
40#include <scsi/scsi_transport.h> 43#include <scsi/scsi_transport.h>
41#include <scsi/scsi_transport_fc.h> 44#include <scsi/scsi_transport_fc.h>
42#include "zfcp_fsf.h"
43#include <asm/ccwdev.h> 45#include <asm/ccwdev.h>
44#include <asm/qdio.h> 46#include <asm/qdio.h>
45#include <asm/debug.h> 47#include <asm/debug.h>
46#include <asm/ebcdic.h> 48#include <asm/ebcdic.h>
47#include <linux/mempool.h> 49#include "zfcp_fsf.h"
48#include <linux/syscalls.h>
49#include <linux/ioctl.h>
50 50
51 51
52/********************* GENERAL DEFINES *********************************/ 52/********************* GENERAL DEFINES *********************************/
@@ -137,7 +137,7 @@ zfcp_address_to_sg(void *address, struct scatterlist *list)
137#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7 137#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7
138 138
139/* timeout value for "default timer" for fsf requests */ 139/* timeout value for "default timer" for fsf requests */
140#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ); 140#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
141 141
142/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/ 142/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/
143 143
@@ -543,7 +543,7 @@ do { \
543} while (0) 543} while (0)
544 544
545#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_NORMAL 545#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_NORMAL
546# define ZFCP_LOG_NORMAL(fmt, args...) 546# define ZFCP_LOG_NORMAL(fmt, args...) do { } while (0)
547#else 547#else
548# define ZFCP_LOG_NORMAL(fmt, args...) \ 548# define ZFCP_LOG_NORMAL(fmt, args...) \
549do { \ 549do { \
@@ -553,7 +553,7 @@ do { \
553#endif 553#endif
554 554
555#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_INFO 555#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_INFO
556# define ZFCP_LOG_INFO(fmt, args...) 556# define ZFCP_LOG_INFO(fmt, args...) do { } while (0)
557#else 557#else
558# define ZFCP_LOG_INFO(fmt, args...) \ 558# define ZFCP_LOG_INFO(fmt, args...) \
559do { \ 559do { \
@@ -563,14 +563,14 @@ do { \
563#endif 563#endif
564 564
565#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_DEBUG 565#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_DEBUG
566# define ZFCP_LOG_DEBUG(fmt, args...) 566# define ZFCP_LOG_DEBUG(fmt, args...) do { } while (0)
567#else 567#else
568# define ZFCP_LOG_DEBUG(fmt, args...) \ 568# define ZFCP_LOG_DEBUG(fmt, args...) \
569 ZFCP_LOG(ZFCP_LOG_LEVEL_DEBUG, fmt , ##args) 569 ZFCP_LOG(ZFCP_LOG_LEVEL_DEBUG, fmt , ##args)
570#endif 570#endif
571 571
572#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_TRACE 572#if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_TRACE
573# define ZFCP_LOG_TRACE(fmt, args...) 573# define ZFCP_LOG_TRACE(fmt, args...) do { } while (0)
574#else 574#else
575# define ZFCP_LOG_TRACE(fmt, args...) \ 575# define ZFCP_LOG_TRACE(fmt, args...) \
576 ZFCP_LOG(ZFCP_LOG_LEVEL_TRACE, fmt , ##args) 576 ZFCP_LOG(ZFCP_LOG_LEVEL_TRACE, fmt , ##args)
@@ -779,7 +779,6 @@ typedef void (*zfcp_send_ct_handler_t)(unsigned long);
779 * @handler_data: data passed to handler function 779 * @handler_data: data passed to handler function
780 * @pool: pointer to memory pool for ct request structure 780 * @pool: pointer to memory pool for ct request structure
781 * @timeout: FSF timeout for this request 781 * @timeout: FSF timeout for this request
782 * @timer: timer (e.g. for request initiated by erp)
783 * @completion: completion for synchronization purposes 782 * @completion: completion for synchronization purposes
784 * @status: used to pass error status to calling function 783 * @status: used to pass error status to calling function
785 */ 784 */
@@ -793,7 +792,6 @@ struct zfcp_send_ct {
793 unsigned long handler_data; 792 unsigned long handler_data;
794 mempool_t *pool; 793 mempool_t *pool;
795 int timeout; 794 int timeout;
796 struct timer_list *timer;
797 struct completion *completion; 795 struct completion *completion;
798 int status; 796 int status;
799}; 797};
@@ -821,7 +819,6 @@ typedef void (*zfcp_send_els_handler_t)(unsigned long);
821 * @resp_count: number of elements in response scatter-gather list 819 * @resp_count: number of elements in response scatter-gather list
822 * @handler: handler function (called for response to the request) 820 * @handler: handler function (called for response to the request)
823 * @handler_data: data passed to handler function 821 * @handler_data: data passed to handler function
824 * @timer: timer (e.g. for request initiated by erp)
825 * @completion: completion for synchronization purposes 822 * @completion: completion for synchronization purposes
826 * @ls_code: hex code of ELS command 823 * @ls_code: hex code of ELS command
827 * @status: used to pass error status to calling function 824 * @status: used to pass error status to calling function
@@ -836,7 +833,6 @@ struct zfcp_send_els {
836 unsigned int resp_count; 833 unsigned int resp_count;
837 zfcp_send_els_handler_t handler; 834 zfcp_send_els_handler_t handler;
838 unsigned long handler_data; 835 unsigned long handler_data;
839 struct timer_list *timer;
840 struct completion *completion; 836 struct completion *completion;
841 int ls_code; 837 int ls_code;
842 int status; 838 int status;
@@ -886,7 +882,6 @@ struct zfcp_adapter {
886 struct list_head port_remove_lh; /* head of ports to be 882 struct list_head port_remove_lh; /* head of ports to be
887 removed */ 883 removed */
888 u32 ports; /* number of remote ports */ 884 u32 ports; /* number of remote ports */
889 struct timer_list scsi_er_timer; /* SCSI err recovery watch */
890 atomic_t reqs_active; /* # active FSF reqs */ 885 atomic_t reqs_active; /* # active FSF reqs */
891 unsigned long req_no; /* unique FSF req number */ 886 unsigned long req_no; /* unique FSF req number */
892 struct list_head *req_list; /* list of pending reqs */ 887 struct list_head *req_list; /* list of pending reqs */
@@ -1003,6 +998,7 @@ struct zfcp_fsf_req {
1003 struct fsf_qtcb *qtcb; /* address of associated QTCB */ 998 struct fsf_qtcb *qtcb; /* address of associated QTCB */
1004 u32 seq_no; /* Sequence number of request */ 999 u32 seq_no; /* Sequence number of request */
1005 unsigned long data; /* private data of request */ 1000 unsigned long data; /* private data of request */
1001 struct timer_list timer; /* used for erp or scsi er */
1006 struct zfcp_erp_action *erp_action; /* used if this request is 1002 struct zfcp_erp_action *erp_action; /* used if this request is
1007 issued on behalf of erp */ 1003 issued on behalf of erp */
1008 mempool_t *pool; /* used if request was alloacted 1004 mempool_t *pool; /* used if request was alloacted
@@ -1016,6 +1012,7 @@ typedef void zfcp_fsf_req_handler_t(struct zfcp_fsf_req*);
1016/* driver data */ 1012/* driver data */
1017struct zfcp_data { 1013struct zfcp_data {
1018 struct scsi_host_template scsi_host_template; 1014 struct scsi_host_template scsi_host_template;
1015 struct scsi_transport_template *scsi_transport_template;
1019 atomic_t status; /* Module status flags */ 1016 atomic_t status; /* Module status flags */
1020 struct list_head adapter_list_head; /* head of adapter list */ 1017 struct list_head adapter_list_head; /* head of adapter list */
1021 struct list_head adapter_remove_lh; /* head of adapters to be 1018 struct list_head adapter_remove_lh; /* head of adapters to be
@@ -1031,6 +1028,9 @@ struct zfcp_data {
1031 wwn_t init_wwpn; 1028 wwn_t init_wwpn;
1032 fcp_lun_t init_fcp_lun; 1029 fcp_lun_t init_fcp_lun;
1033 char *driver_version; 1030 char *driver_version;
1031 kmem_cache_t *fsf_req_qtcb_cache;
1032 kmem_cache_t *sr_buffer_cache;
1033 kmem_cache_t *gid_pn_cache;
1034}; 1034};
1035 1035
1036/** 1036/**
@@ -1051,7 +1051,7 @@ struct zfcp_sg_list {
1051#define ZFCP_POOL_DATA_GID_PN_NR 1 1051#define ZFCP_POOL_DATA_GID_PN_NR 1
1052 1052
1053/* struct used by memory pools for fsf_requests */ 1053/* struct used by memory pools for fsf_requests */
1054struct zfcp_fsf_req_pool_element { 1054struct zfcp_fsf_req_qtcb {
1055 struct zfcp_fsf_req fsf_req; 1055 struct zfcp_fsf_req fsf_req;
1056 struct fsf_qtcb qtcb; 1056 struct fsf_qtcb qtcb;
1057}; 1057};
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 7f60b6fdf724..862a411a4aa0 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -64,8 +64,6 @@ static int zfcp_erp_strategy_check_action(struct zfcp_erp_action *, int);
64static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *); 64static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *);
65static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int); 65static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int);
66static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *); 66static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *);
67static void zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *);
68static void zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *);
69static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *); 67static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *);
70static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *); 68static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *);
71static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *); 69static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *);
@@ -93,6 +91,7 @@ static int zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *);
93static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *); 91static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *);
94static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *); 92static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *);
95 93
94static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *);
96static void zfcp_erp_action_dismiss_port(struct zfcp_port *); 95static void zfcp_erp_action_dismiss_port(struct zfcp_port *);
97static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *); 96static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *);
98static void zfcp_erp_action_dismiss(struct zfcp_erp_action *); 97static void zfcp_erp_action_dismiss(struct zfcp_erp_action *);
@@ -111,64 +110,86 @@ static inline void zfcp_erp_action_to_ready(struct zfcp_erp_action *);
111static inline void zfcp_erp_action_to_running(struct zfcp_erp_action *); 110static inline void zfcp_erp_action_to_running(struct zfcp_erp_action *);
112 111
113static void zfcp_erp_memwait_handler(unsigned long); 112static void zfcp_erp_memwait_handler(unsigned long);
114static void zfcp_erp_timeout_handler(unsigned long);
115static inline void zfcp_erp_timeout_init(struct zfcp_erp_action *);
116 113
117/** 114/**
118 * zfcp_fsf_request_timeout_handler - called if a request timed out 115 * zfcp_close_qdio - close qdio queues for an adapter
119 * @data: pointer to adapter for handler function
120 *
121 * This function needs to be called if requests (ELS, Generic Service,
122 * or SCSI commands) exceed a certain time limit. The assumption is
123 * that after the time limit the adapter get stuck. So we trigger a reopen of
124 * the adapter. This should not be used for error recovery, SCSI abort
125 * commands and SCSI requests from SCSI mid-layer.
126 */ 116 */
127void 117static void zfcp_close_qdio(struct zfcp_adapter *adapter)
128zfcp_fsf_request_timeout_handler(unsigned long data)
129{ 118{
130 struct zfcp_adapter *adapter; 119 struct zfcp_qdio_queue *req_queue;
120 int first, count;
131 121
132 adapter = (struct zfcp_adapter *) data; 122 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status))
123 return;
133 124
134 zfcp_erp_adapter_reopen(adapter, 0); 125 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
126 req_queue = &adapter->request_queue;
127 write_lock_irq(&req_queue->queue_lock);
128 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
129 write_unlock_irq(&req_queue->queue_lock);
130
131 debug_text_event(adapter->erp_dbf, 3, "qdio_down2a");
132 while (qdio_shutdown(adapter->ccw_device,
133 QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
134 msleep(1000);
135 debug_text_event(adapter->erp_dbf, 3, "qdio_down2b");
136
137 /* cleanup used outbound sbals */
138 count = atomic_read(&req_queue->free_count);
139 if (count < QDIO_MAX_BUFFERS_PER_Q) {
140 first = (req_queue->free_index+count) % QDIO_MAX_BUFFERS_PER_Q;
141 count = QDIO_MAX_BUFFERS_PER_Q - count;
142 zfcp_qdio_zero_sbals(req_queue->buffer, first, count);
143 }
144 req_queue->free_index = 0;
145 atomic_set(&req_queue->free_count, 0);
146 req_queue->distance_from_int = 0;
147 adapter->response_queue.free_index = 0;
148 atomic_set(&adapter->response_queue.free_count, 0);
135} 149}
136 150
137/** 151/**
138 * zfcp_fsf_scsi_er_timeout_handler - timeout handler for scsi eh tasks 152 * zfcp_close_fsf - stop FSF operations for an adapter
139 * 153 *
140 * This function needs to be called whenever a SCSI error recovery 154 * Dismiss and cleanup all pending fsf_reqs (this wakes up all initiators of
141 * action (abort/reset) does not return. Re-opening the adapter means 155 * requests waiting for completion; especially this returns SCSI commands
142 * that the abort/reset command can be returned by zfcp. It won't complete 156 * with error state).
143 * via the adapter anymore (because qdio queues are closed). If ERP is
144 * already running on this adapter it will be stopped.
145 */ 157 */
146void zfcp_fsf_scsi_er_timeout_handler(unsigned long data) 158static void zfcp_close_fsf(struct zfcp_adapter *adapter)
147{ 159{
148 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; 160 /* close queues to ensure that buffers are not accessed by adapter */
149 unsigned long flags; 161 zfcp_close_qdio(adapter);
150 162 zfcp_fsf_req_dismiss_all(adapter);
151 ZFCP_LOG_NORMAL("warning: SCSI error recovery timed out. " 163 /* reset FSF request sequence number */
152 "Restarting all operations on the adapter %s\n", 164 adapter->fsf_req_seq_no = 0;
153 zfcp_get_busid_by_adapter(adapter)); 165 /* all ports and units are closed */
154 debug_text_event(adapter->erp_dbf, 1, "eh_lmem_tout"); 166 zfcp_erp_modify_adapter_status(adapter,
167 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
168}
155 169
156 write_lock_irqsave(&adapter->erp_lock, flags); 170/**
157 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, 171 * zfcp_fsf_request_timeout_handler - called if a request timed out
158 &adapter->status)) { 172 * @data: pointer to adapter for handler function
159 zfcp_erp_modify_adapter_status(adapter, 173 *
160 ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN, 174 * This function needs to be called if requests (ELS, Generic Service,
161 ZFCP_CLEAR); 175 * or SCSI commands) exceed a certain time limit. The assumption is
162 zfcp_erp_action_dismiss_adapter(adapter); 176 * that after the time limit the adapter get stuck. So we trigger a reopen of
163 write_unlock_irqrestore(&adapter->erp_lock, flags); 177 * the adapter.
164 /* dismiss all pending requests including requests for ERP */ 178 */
165 zfcp_fsf_req_dismiss_all(adapter); 179static void zfcp_fsf_request_timeout_handler(unsigned long data)
166 adapter->fsf_req_seq_no = 0; 180{
167 } else 181 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
168 write_unlock_irqrestore(&adapter->erp_lock, flags);
169 zfcp_erp_adapter_reopen(adapter, 0); 182 zfcp_erp_adapter_reopen(adapter, 0);
170} 183}
171 184
185void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, unsigned long timeout)
186{
187 fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
188 fsf_req->timer.data = (unsigned long) fsf_req->adapter;
189 fsf_req->timer.expires = timeout;
190 add_timer(&fsf_req->timer);
191}
192
172/* 193/*
173 * function: 194 * function:
174 * 195 *
@@ -282,7 +303,6 @@ zfcp_erp_adisc(struct zfcp_port *port)
282 struct zfcp_ls_adisc *adisc; 303 struct zfcp_ls_adisc *adisc;
283 void *address = NULL; 304 void *address = NULL;
284 int retval = 0; 305 int retval = 0;
285 struct timer_list *timer;
286 306
287 send_els = kzalloc(sizeof(struct zfcp_send_els), GFP_ATOMIC); 307 send_els = kzalloc(sizeof(struct zfcp_send_els), GFP_ATOMIC);
288 if (send_els == NULL) 308 if (send_els == NULL)
@@ -329,22 +349,11 @@ zfcp_erp_adisc(struct zfcp_port *port)
329 (wwn_t) adisc->wwnn, adisc->hard_nport_id, 349 (wwn_t) adisc->wwnn, adisc->hard_nport_id,
330 adisc->nport_id); 350 adisc->nport_id);
331 351
332 timer = kmalloc(sizeof(struct timer_list), GFP_ATOMIC);
333 if (!timer)
334 goto nomem;
335
336 init_timer(timer);
337 timer->function = zfcp_fsf_request_timeout_handler;
338 timer->data = (unsigned long) adapter;
339 timer->expires = ZFCP_FSF_REQUEST_TIMEOUT;
340 send_els->timer = timer;
341
342 retval = zfcp_fsf_send_els(send_els); 352 retval = zfcp_fsf_send_els(send_els);
343 if (retval != 0) { 353 if (retval != 0) {
344 ZFCP_LOG_NORMAL("error: initiation of Send ELS failed for port " 354 ZFCP_LOG_NORMAL("error: initiation of Send ELS failed for port "
345 "0x%08x on adapter %s\n", send_els->d_id, 355 "0x%08x on adapter %s\n", send_els->d_id,
346 zfcp_get_busid_by_adapter(adapter)); 356 zfcp_get_busid_by_adapter(adapter));
347 del_timer(send_els->timer);
348 goto freemem; 357 goto freemem;
349 } 358 }
350 359
@@ -356,7 +365,6 @@ zfcp_erp_adisc(struct zfcp_port *port)
356 if (address != NULL) 365 if (address != NULL)
357 __free_pages(send_els->req->page, 0); 366 __free_pages(send_els->req->page, 0);
358 if (send_els != NULL) { 367 if (send_els != NULL) {
359 kfree(send_els->timer);
360 kfree(send_els->req); 368 kfree(send_els->req);
361 kfree(send_els->resp); 369 kfree(send_els->resp);
362 kfree(send_els); 370 kfree(send_els);
@@ -382,9 +390,6 @@ zfcp_erp_adisc_handler(unsigned long data)
382 struct zfcp_ls_adisc_acc *adisc; 390 struct zfcp_ls_adisc_acc *adisc;
383 391
384 send_els = (struct zfcp_send_els *) data; 392 send_els = (struct zfcp_send_els *) data;
385
386 del_timer(send_els->timer);
387
388 adapter = send_els->adapter; 393 adapter = send_els->adapter;
389 port = send_els->port; 394 port = send_els->port;
390 d_id = send_els->d_id; 395 d_id = send_els->d_id;
@@ -433,7 +438,6 @@ zfcp_erp_adisc_handler(unsigned long data)
433 out: 438 out:
434 zfcp_port_put(port); 439 zfcp_port_put(port);
435 __free_pages(send_els->req->page, 0); 440 __free_pages(send_els->req->page, 0);
436 kfree(send_els->timer);
437 kfree(send_els->req); 441 kfree(send_els->req);
438 kfree(send_els->resp); 442 kfree(send_els->resp);
439 kfree(send_els); 443 kfree(send_els);
@@ -909,8 +913,6 @@ static void zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action,
909 debug_text_event(adapter->erp_dbf, 2, "a_asyh_ex"); 913 debug_text_event(adapter->erp_dbf, 2, "a_asyh_ex");
910 debug_event(adapter->erp_dbf, 2, &erp_action->action, 914 debug_event(adapter->erp_dbf, 2, &erp_action->action,
911 sizeof (int)); 915 sizeof (int));
912 if (!(set_mask & ZFCP_STATUS_ERP_TIMEDOUT))
913 del_timer(&erp_action->timer);
914 erp_action->status |= set_mask; 916 erp_action->status |= set_mask;
915 zfcp_erp_action_ready(erp_action); 917 zfcp_erp_action_ready(erp_action);
916 } else { 918 } else {
@@ -957,8 +959,7 @@ zfcp_erp_memwait_handler(unsigned long data)
957 * action gets an appropriate flag and will be processed 959 * action gets an appropriate flag and will be processed
958 * accordingly 960 * accordingly
959 */ 961 */
960static void 962void zfcp_erp_timeout_handler(unsigned long data)
961zfcp_erp_timeout_handler(unsigned long data)
962{ 963{
963 struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data; 964 struct zfcp_erp_action *erp_action = (struct zfcp_erp_action *) data;
964 struct zfcp_adapter *adapter = erp_action->adapter; 965 struct zfcp_adapter *adapter = erp_action->adapter;
@@ -1934,8 +1935,7 @@ zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *erp_action, int close)
1934 &erp_action->adapter->status); 1935 &erp_action->adapter->status);
1935 1936
1936 failed_openfcp: 1937 failed_openfcp:
1937 zfcp_erp_adapter_strategy_close_qdio(erp_action); 1938 zfcp_close_fsf(erp_action->adapter);
1938 zfcp_erp_adapter_strategy_close_fsf(erp_action);
1939 failed_qdio: 1939 failed_qdio:
1940 out: 1940 out:
1941 return retval; 1941 return retval;
@@ -2040,59 +2040,6 @@ zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action)
2040 return retval; 2040 return retval;
2041} 2041}
2042 2042
2043/**
2044 * zfcp_erp_adapter_strategy_close_qdio - close qdio queues for an adapter
2045 */
2046static void
2047zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
2048{
2049 int first_used;
2050 int used_count;
2051 struct zfcp_adapter *adapter = erp_action->adapter;
2052
2053 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status)) {
2054 ZFCP_LOG_DEBUG("error: attempt to shut down inactive QDIO "
2055 "queues on adapter %s\n",
2056 zfcp_get_busid_by_adapter(adapter));
2057 return;
2058 }
2059
2060 /*
2061 * Get queue_lock and clear QDIOUP flag. Thus it's guaranteed that
2062 * do_QDIO won't be called while qdio_shutdown is in progress.
2063 */
2064 write_lock_irq(&adapter->request_queue.queue_lock);
2065 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
2066 write_unlock_irq(&adapter->request_queue.queue_lock);
2067
2068 debug_text_event(adapter->erp_dbf, 3, "qdio_down2a");
2069 while (qdio_shutdown(adapter->ccw_device,
2070 QDIO_FLAG_CLEANUP_USING_CLEAR) == -EINPROGRESS)
2071 msleep(1000);
2072 debug_text_event(adapter->erp_dbf, 3, "qdio_down2b");
2073
2074 /*
2075 * First we had to stop QDIO operation.
2076 * Now it is safe to take the following actions.
2077 */
2078
2079 /* Cleanup only necessary when there are unacknowledged buffers */
2080 if (atomic_read(&adapter->request_queue.free_count)
2081 < QDIO_MAX_BUFFERS_PER_Q) {
2082 first_used = (adapter->request_queue.free_index +
2083 atomic_read(&adapter->request_queue.free_count))
2084 % QDIO_MAX_BUFFERS_PER_Q;
2085 used_count = QDIO_MAX_BUFFERS_PER_Q -
2086 atomic_read(&adapter->request_queue.free_count);
2087 zfcp_qdio_zero_sbals(adapter->request_queue.buffer,
2088 first_used, used_count);
2089 }
2090 adapter->response_queue.free_index = 0;
2091 atomic_set(&adapter->response_queue.free_count, 0);
2092 adapter->request_queue.free_index = 0;
2093 atomic_set(&adapter->request_queue.free_count, 0);
2094 adapter->request_queue.distance_from_int = 0;
2095}
2096 2043
2097static int 2044static int
2098zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action) 2045zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action)
@@ -2127,7 +2074,6 @@ zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action)
2127 write_lock_irq(&adapter->erp_lock); 2074 write_lock_irq(&adapter->erp_lock);
2128 zfcp_erp_action_to_running(erp_action); 2075 zfcp_erp_action_to_running(erp_action);
2129 write_unlock_irq(&adapter->erp_lock); 2076 write_unlock_irq(&adapter->erp_lock);
2130 zfcp_erp_timeout_init(erp_action);
2131 if (zfcp_fsf_exchange_config_data(erp_action)) { 2077 if (zfcp_fsf_exchange_config_data(erp_action)) {
2132 retval = ZFCP_ERP_FAILED; 2078 retval = ZFCP_ERP_FAILED;
2133 debug_text_event(adapter->erp_dbf, 5, "a_fstx_xf"); 2079 debug_text_event(adapter->erp_dbf, 5, "a_fstx_xf");
@@ -2196,7 +2142,6 @@ zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action)
2196 zfcp_erp_action_to_running(erp_action); 2142 zfcp_erp_action_to_running(erp_action);
2197 write_unlock_irq(&adapter->erp_lock); 2143 write_unlock_irq(&adapter->erp_lock);
2198 2144
2199 zfcp_erp_timeout_init(erp_action);
2200 ret = zfcp_fsf_exchange_port_data(erp_action, adapter, NULL); 2145 ret = zfcp_fsf_exchange_port_data(erp_action, adapter, NULL);
2201 if (ret == -EOPNOTSUPP) { 2146 if (ret == -EOPNOTSUPP) {
2202 debug_text_event(adapter->erp_dbf, 3, "a_xport_notsupp"); 2147 debug_text_event(adapter->erp_dbf, 3, "a_xport_notsupp");
@@ -2248,27 +2193,6 @@ zfcp_erp_adapter_strategy_open_fsf_statusread(struct zfcp_erp_action
2248 return retval; 2193 return retval;
2249} 2194}
2250 2195
2251/**
2252 * zfcp_erp_adapter_strategy_close_fsf - stop FSF operations for an adapter
2253 */
2254static void
2255zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action)
2256{
2257 struct zfcp_adapter *adapter = erp_action->adapter;
2258
2259 /*
2260 * wake waiting initiators of requests,
2261 * return SCSI commands (with error status),
2262 * clean up all requests (synchronously)
2263 */
2264 zfcp_fsf_req_dismiss_all(adapter);
2265 /* reset FSF request sequence number */
2266 adapter->fsf_req_seq_no = 0;
2267 /* all ports and units are closed */
2268 zfcp_erp_modify_adapter_status(adapter,
2269 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
2270}
2271
2272/* 2196/*
2273 * function: 2197 * function:
2274 * 2198 *
@@ -2605,7 +2529,6 @@ zfcp_erp_port_forced_strategy_close(struct zfcp_erp_action *erp_action)
2605 struct zfcp_adapter *adapter = erp_action->adapter; 2529 struct zfcp_adapter *adapter = erp_action->adapter;
2606 struct zfcp_port *port = erp_action->port; 2530 struct zfcp_port *port = erp_action->port;
2607 2531
2608 zfcp_erp_timeout_init(erp_action);
2609 retval = zfcp_fsf_close_physical_port(erp_action); 2532 retval = zfcp_fsf_close_physical_port(erp_action);
2610 if (retval == -ENOMEM) { 2533 if (retval == -ENOMEM) {
2611 debug_text_event(adapter->erp_dbf, 5, "o_pfstc_nomem"); 2534 debug_text_event(adapter->erp_dbf, 5, "o_pfstc_nomem");
@@ -2662,7 +2585,6 @@ zfcp_erp_port_strategy_close(struct zfcp_erp_action *erp_action)
2662 struct zfcp_adapter *adapter = erp_action->adapter; 2585 struct zfcp_adapter *adapter = erp_action->adapter;
2663 struct zfcp_port *port = erp_action->port; 2586 struct zfcp_port *port = erp_action->port;
2664 2587
2665 zfcp_erp_timeout_init(erp_action);
2666 retval = zfcp_fsf_close_port(erp_action); 2588 retval = zfcp_fsf_close_port(erp_action);
2667 if (retval == -ENOMEM) { 2589 if (retval == -ENOMEM) {
2668 debug_text_event(adapter->erp_dbf, 5, "p_pstc_nomem"); 2590 debug_text_event(adapter->erp_dbf, 5, "p_pstc_nomem");
@@ -2700,7 +2622,6 @@ zfcp_erp_port_strategy_open_port(struct zfcp_erp_action *erp_action)
2700 struct zfcp_adapter *adapter = erp_action->adapter; 2622 struct zfcp_adapter *adapter = erp_action->adapter;
2701 struct zfcp_port *port = erp_action->port; 2623 struct zfcp_port *port = erp_action->port;
2702 2624
2703 zfcp_erp_timeout_init(erp_action);
2704 retval = zfcp_fsf_open_port(erp_action); 2625 retval = zfcp_fsf_open_port(erp_action);
2705 if (retval == -ENOMEM) { 2626 if (retval == -ENOMEM) {
2706 debug_text_event(adapter->erp_dbf, 5, "p_psto_nomem"); 2627 debug_text_event(adapter->erp_dbf, 5, "p_psto_nomem");
@@ -2738,7 +2659,6 @@ zfcp_erp_port_strategy_open_common_lookup(struct zfcp_erp_action *erp_action)
2738 struct zfcp_adapter *adapter = erp_action->adapter; 2659 struct zfcp_adapter *adapter = erp_action->adapter;
2739 struct zfcp_port *port = erp_action->port; 2660 struct zfcp_port *port = erp_action->port;
2740 2661
2741 zfcp_erp_timeout_init(erp_action);
2742 retval = zfcp_ns_gid_pn_request(erp_action); 2662 retval = zfcp_ns_gid_pn_request(erp_action);
2743 if (retval == -ENOMEM) { 2663 if (retval == -ENOMEM) {
2744 debug_text_event(adapter->erp_dbf, 5, "p_pstn_nomem"); 2664 debug_text_event(adapter->erp_dbf, 5, "p_pstn_nomem");
@@ -2864,7 +2784,6 @@ zfcp_erp_unit_strategy_close(struct zfcp_erp_action *erp_action)
2864 struct zfcp_adapter *adapter = erp_action->adapter; 2784 struct zfcp_adapter *adapter = erp_action->adapter;
2865 struct zfcp_unit *unit = erp_action->unit; 2785 struct zfcp_unit *unit = erp_action->unit;
2866 2786
2867 zfcp_erp_timeout_init(erp_action);
2868 retval = zfcp_fsf_close_unit(erp_action); 2787 retval = zfcp_fsf_close_unit(erp_action);
2869 if (retval == -ENOMEM) { 2788 if (retval == -ENOMEM) {
2870 debug_text_event(adapter->erp_dbf, 5, "u_ustc_nomem"); 2789 debug_text_event(adapter->erp_dbf, 5, "u_ustc_nomem");
@@ -2905,7 +2824,6 @@ zfcp_erp_unit_strategy_open(struct zfcp_erp_action *erp_action)
2905 struct zfcp_adapter *adapter = erp_action->adapter; 2824 struct zfcp_adapter *adapter = erp_action->adapter;
2906 struct zfcp_unit *unit = erp_action->unit; 2825 struct zfcp_unit *unit = erp_action->unit;
2907 2826
2908 zfcp_erp_timeout_init(erp_action);
2909 retval = zfcp_fsf_open_unit(erp_action); 2827 retval = zfcp_fsf_open_unit(erp_action);
2910 if (retval == -ENOMEM) { 2828 if (retval == -ENOMEM) {
2911 debug_text_event(adapter->erp_dbf, 5, "u_usto_nomem"); 2829 debug_text_event(adapter->erp_dbf, 5, "u_usto_nomem");
@@ -2930,14 +2848,13 @@ zfcp_erp_unit_strategy_open(struct zfcp_erp_action *erp_action)
2930 return retval; 2848 return retval;
2931} 2849}
2932 2850
2933static inline void 2851void zfcp_erp_start_timer(struct zfcp_fsf_req *fsf_req)
2934zfcp_erp_timeout_init(struct zfcp_erp_action *erp_action)
2935{ 2852{
2936 init_timer(&erp_action->timer); 2853 BUG_ON(!fsf_req->erp_action);
2937 erp_action->timer.function = zfcp_erp_timeout_handler; 2854 fsf_req->timer.function = zfcp_erp_timeout_handler;
2938 erp_action->timer.data = (unsigned long) erp_action; 2855 fsf_req->timer.data = (unsigned long) fsf_req->erp_action;
2939 /* jiffies will be added in zfcp_fsf_req_send */ 2856 fsf_req->timer.expires = jiffies + ZFCP_ERP_FSFREQ_TIMEOUT;
2940 erp_action->timer.expires = ZFCP_ERP_FSFREQ_TIMEOUT; 2857 add_timer(&fsf_req->timer);
2941} 2858}
2942 2859
2943/* 2860/*
@@ -3241,7 +3158,7 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
3241} 3158}
3242 3159
3243 3160
3244void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) 3161static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
3245{ 3162{
3246 struct zfcp_port *port; 3163 struct zfcp_port *port;
3247 3164
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 146d7a2b4c4a..b8794d77285d 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -55,7 +55,6 @@ extern void zfcp_unit_dequeue(struct zfcp_unit *);
55 55
56/******************************* S/390 IO ************************************/ 56/******************************* S/390 IO ************************************/
57extern int zfcp_ccw_register(void); 57extern int zfcp_ccw_register(void);
58extern void zfcp_ccw_unregister(void);
59 58
60extern void zfcp_qdio_zero_sbals(struct qdio_buffer **, int, int); 59extern void zfcp_qdio_zero_sbals(struct qdio_buffer **, int, int);
61extern int zfcp_qdio_allocate(struct zfcp_adapter *); 60extern int zfcp_qdio_allocate(struct zfcp_adapter *);
@@ -88,8 +87,8 @@ extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *,
88 struct fsf_qtcb_bottom_port *); 87 struct fsf_qtcb_bottom_port *);
89extern int zfcp_fsf_control_file(struct zfcp_adapter *, struct zfcp_fsf_req **, 88extern int zfcp_fsf_control_file(struct zfcp_adapter *, struct zfcp_fsf_req **,
90 u32, u32, struct zfcp_sg_list *); 89 u32, u32, struct zfcp_sg_list *);
91extern void zfcp_fsf_request_timeout_handler(unsigned long); 90extern void zfcp_fsf_start_timer(struct zfcp_fsf_req *, unsigned long);
92extern void zfcp_fsf_scsi_er_timeout_handler(unsigned long); 91extern void zfcp_erp_start_timer(struct zfcp_fsf_req *);
93extern int zfcp_fsf_req_dismiss_all(struct zfcp_adapter *); 92extern int zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
94extern int zfcp_fsf_status_read(struct zfcp_adapter *, int); 93extern int zfcp_fsf_status_read(struct zfcp_adapter *, int);
95extern int zfcp_fsf_req_create(struct zfcp_adapter *, u32, int, mempool_t *, 94extern int zfcp_fsf_req_create(struct zfcp_adapter *, u32, int, mempool_t *,
@@ -99,8 +98,7 @@ extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *,
99extern int zfcp_fsf_send_els(struct zfcp_send_els *); 98extern int zfcp_fsf_send_els(struct zfcp_send_els *);
100extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *, 99extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *,
101 struct zfcp_unit *, 100 struct zfcp_unit *,
102 struct scsi_cmnd *, 101 struct scsi_cmnd *, int, int);
103 struct timer_list*, int);
104extern int zfcp_fsf_req_complete(struct zfcp_fsf_req *); 102extern int zfcp_fsf_req_complete(struct zfcp_fsf_req *);
105extern void zfcp_fsf_incoming_els(struct zfcp_fsf_req *); 103extern void zfcp_fsf_incoming_els(struct zfcp_fsf_req *);
106extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); 104extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
@@ -124,14 +122,11 @@ extern char *zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *);
124extern void set_host_byte(u32 *, char); 122extern void set_host_byte(u32 *, char);
125extern void set_driver_byte(u32 *, char); 123extern void set_driver_byte(u32 *, char);
126extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *); 124extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
127extern void zfcp_fsf_start_scsi_er_timer(struct zfcp_adapter *);
128extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *); 125extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *);
129 126
130extern int zfcp_scsi_command_async(struct zfcp_adapter *,struct zfcp_unit *, 127extern int zfcp_scsi_command_async(struct zfcp_adapter *,struct zfcp_unit *,
131 struct scsi_cmnd *, struct timer_list *); 128 struct scsi_cmnd *, int);
132extern int zfcp_scsi_command_sync(struct zfcp_unit *, struct scsi_cmnd *, 129extern int zfcp_scsi_command_sync(struct zfcp_unit *, struct scsi_cmnd *, int);
133 struct timer_list *);
134extern struct scsi_transport_template *zfcp_transport_template;
135extern struct fc_function_template zfcp_transport_functions; 130extern struct fc_function_template zfcp_transport_functions;
136 131
137/******************************** ERP ****************************************/ 132/******************************** ERP ****************************************/
@@ -139,7 +134,6 @@ extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u32, int);
139extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int); 134extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int);
140extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int); 135extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int);
141extern void zfcp_erp_adapter_failed(struct zfcp_adapter *); 136extern void zfcp_erp_adapter_failed(struct zfcp_adapter *);
142extern void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *);
143 137
144extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int); 138extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int);
145extern int zfcp_erp_port_reopen(struct zfcp_port *, int); 139extern int zfcp_erp_port_reopen(struct zfcp_port *, int);
@@ -187,7 +181,7 @@ extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *,
187 struct zfcp_fsf_req *); 181 struct zfcp_fsf_req *);
188extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *, 182extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *,
189 struct scsi_cmnd *, struct zfcp_fsf_req *, 183 struct scsi_cmnd *, struct zfcp_fsf_req *,
190 struct zfcp_fsf_req *); 184 unsigned long);
191extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, 185extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
192 struct scsi_cmnd *); 186 struct scsi_cmnd *);
193extern void zfcp_reqlist_add(struct zfcp_adapter *, struct zfcp_fsf_req *); 187extern void zfcp_reqlist_add(struct zfcp_adapter *, struct zfcp_fsf_req *);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index ff2eacf5ec8c..277826cdd0c8 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -42,7 +42,7 @@ static inline int zfcp_fsf_req_sbal_check(
42static inline int zfcp_use_one_sbal( 42static inline int zfcp_use_one_sbal(
43 struct scatterlist *, int, struct scatterlist *, int); 43 struct scatterlist *, int, struct scatterlist *, int);
44static struct zfcp_fsf_req *zfcp_fsf_req_alloc(mempool_t *, int); 44static struct zfcp_fsf_req *zfcp_fsf_req_alloc(mempool_t *, int);
45static int zfcp_fsf_req_send(struct zfcp_fsf_req *, struct timer_list *); 45static int zfcp_fsf_req_send(struct zfcp_fsf_req *);
46static int zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *); 46static int zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *);
47static int zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *); 47static int zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *);
48static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *); 48static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *);
@@ -100,14 +100,19 @@ zfcp_fsf_req_alloc(mempool_t *pool, int req_flags)
100 if (req_flags & ZFCP_REQ_NO_QTCB) 100 if (req_flags & ZFCP_REQ_NO_QTCB)
101 size = sizeof(struct zfcp_fsf_req); 101 size = sizeof(struct zfcp_fsf_req);
102 else 102 else
103 size = sizeof(struct zfcp_fsf_req_pool_element); 103 size = sizeof(struct zfcp_fsf_req_qtcb);
104 104
105 if (likely(pool != NULL)) 105 if (likely(pool))
106 ptr = mempool_alloc(pool, GFP_ATOMIC); 106 ptr = mempool_alloc(pool, GFP_ATOMIC);
107 else 107 else {
108 ptr = kmalloc(size, GFP_ATOMIC); 108 if (req_flags & ZFCP_REQ_NO_QTCB)
109 ptr = kmalloc(size, GFP_ATOMIC);
110 else
111 ptr = kmem_cache_alloc(zfcp_data.fsf_req_qtcb_cache,
112 SLAB_ATOMIC);
113 }
109 114
110 if (unlikely(NULL == ptr)) 115 if (unlikely(!ptr))
111 goto out; 116 goto out;
112 117
113 memset(ptr, 0, size); 118 memset(ptr, 0, size);
@@ -115,9 +120,8 @@ zfcp_fsf_req_alloc(mempool_t *pool, int req_flags)
115 if (req_flags & ZFCP_REQ_NO_QTCB) { 120 if (req_flags & ZFCP_REQ_NO_QTCB) {
116 fsf_req = (struct zfcp_fsf_req *) ptr; 121 fsf_req = (struct zfcp_fsf_req *) ptr;
117 } else { 122 } else {
118 fsf_req = &((struct zfcp_fsf_req_pool_element *) ptr)->fsf_req; 123 fsf_req = &((struct zfcp_fsf_req_qtcb *) ptr)->fsf_req;
119 fsf_req->qtcb = 124 fsf_req->qtcb = &((struct zfcp_fsf_req_qtcb *) ptr)->qtcb;
120 &((struct zfcp_fsf_req_pool_element *) ptr)->qtcb;
121 } 125 }
122 126
123 fsf_req->pool = pool; 127 fsf_req->pool = pool;
@@ -139,10 +143,17 @@ zfcp_fsf_req_alloc(mempool_t *pool, int req_flags)
139void 143void
140zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req) 144zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req)
141{ 145{
142 if (likely(fsf_req->pool != NULL)) 146 if (likely(fsf_req->pool)) {
143 mempool_free(fsf_req, fsf_req->pool); 147 mempool_free(fsf_req, fsf_req->pool);
144 else 148 return;
145 kfree(fsf_req); 149 }
150
151 if (fsf_req->qtcb) {
152 kmem_cache_free(zfcp_data.fsf_req_qtcb_cache, fsf_req);
153 return;
154 }
155
156 kfree(fsf_req);
146} 157}
147 158
148/** 159/**
@@ -214,8 +225,10 @@ zfcp_fsf_req_complete(struct zfcp_fsf_req *fsf_req)
214 */ 225 */
215 zfcp_fsf_status_read_handler(fsf_req); 226 zfcp_fsf_status_read_handler(fsf_req);
216 goto out; 227 goto out;
217 } else 228 } else {
229 del_timer(&fsf_req->timer);
218 zfcp_fsf_protstatus_eval(fsf_req); 230 zfcp_fsf_protstatus_eval(fsf_req);
231 }
219 232
220 /* 233 /*
221 * fsf_req may be deleted due to waking up functions, so 234 * fsf_req may be deleted due to waking up functions, so
@@ -774,8 +787,7 @@ zfcp_fsf_status_read(struct zfcp_adapter *adapter, int req_flags)
774 sbale->addr = (void *) status_buffer; 787 sbale->addr = (void *) status_buffer;
775 sbale->length = sizeof(struct fsf_status_read_buffer); 788 sbale->length = sizeof(struct fsf_status_read_buffer);
776 789
777 /* start QDIO request for this FSF request */ 790 retval = zfcp_fsf_req_send(fsf_req);
778 retval = zfcp_fsf_req_send(fsf_req, NULL);
779 if (retval) { 791 if (retval) {
780 ZFCP_LOG_DEBUG("error: Could not set-up unsolicited status " 792 ZFCP_LOG_DEBUG("error: Could not set-up unsolicited status "
781 "environment.\n"); 793 "environment.\n");
@@ -1101,8 +1113,8 @@ zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
1101 struct zfcp_unit *unit, int req_flags) 1113 struct zfcp_unit *unit, int req_flags)
1102{ 1114{
1103 volatile struct qdio_buffer_element *sbale; 1115 volatile struct qdio_buffer_element *sbale;
1104 unsigned long lock_flags;
1105 struct zfcp_fsf_req *fsf_req = NULL; 1116 struct zfcp_fsf_req *fsf_req = NULL;
1117 unsigned long lock_flags;
1106 int retval = 0; 1118 int retval = 0;
1107 1119
1108 /* setup new FSF request */ 1120 /* setup new FSF request */
@@ -1132,12 +1144,9 @@ zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
1132 /* set handle of request which should be aborted */ 1144 /* set handle of request which should be aborted */
1133 fsf_req->qtcb->bottom.support.req_handle = (u64) old_req_id; 1145 fsf_req->qtcb->bottom.support.req_handle = (u64) old_req_id;
1134 1146
1135 /* start QDIO request for this FSF request */ 1147 zfcp_fsf_start_timer(fsf_req, ZFCP_SCSI_ER_TIMEOUT);
1136 1148 retval = zfcp_fsf_req_send(fsf_req);
1137 zfcp_fsf_start_scsi_er_timer(adapter);
1138 retval = zfcp_fsf_req_send(fsf_req, NULL);
1139 if (retval) { 1149 if (retval) {
1140 del_timer(&adapter->scsi_er_timer);
1141 ZFCP_LOG_INFO("error: Failed to send abort command request " 1150 ZFCP_LOG_INFO("error: Failed to send abort command request "
1142 "on adapter %s, port 0x%016Lx, unit 0x%016Lx\n", 1151 "on adapter %s, port 0x%016Lx, unit 0x%016Lx\n",
1143 zfcp_get_busid_by_adapter(adapter), 1152 zfcp_get_busid_by_adapter(adapter),
@@ -1173,8 +1182,6 @@ zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req)
1173 unsigned char status_qual = 1182 unsigned char status_qual =
1174 new_fsf_req->qtcb->header.fsf_status_qual.word[0]; 1183 new_fsf_req->qtcb->header.fsf_status_qual.word[0];
1175 1184
1176 del_timer(&new_fsf_req->adapter->scsi_er_timer);
1177
1178 if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 1185 if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1179 /* do not set ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED */ 1186 /* do not set ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED */
1180 goto skip_fsfstatus; 1187 goto skip_fsfstatus;
@@ -1380,11 +1387,6 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1380 goto failed_req; 1387 goto failed_req;
1381 } 1388 }
1382 1389
1383 if (erp_action != NULL) {
1384 erp_action->fsf_req = fsf_req;
1385 fsf_req->erp_action = erp_action;
1386 }
1387
1388 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 1390 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
1389 if (zfcp_use_one_sbal(ct->req, ct->req_count, 1391 if (zfcp_use_one_sbal(ct->req, ct->req_count,
1390 ct->resp, ct->resp_count)){ 1392 ct->resp, ct->resp_count)){
@@ -1451,8 +1453,14 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1451 1453
1452 zfcp_san_dbf_event_ct_request(fsf_req); 1454 zfcp_san_dbf_event_ct_request(fsf_req);
1453 1455
1454 /* start QDIO request for this FSF request */ 1456 if (erp_action) {
1455 ret = zfcp_fsf_req_send(fsf_req, ct->timer); 1457 erp_action->fsf_req = fsf_req;
1458 fsf_req->erp_action = erp_action;
1459 zfcp_erp_start_timer(fsf_req);
1460 } else
1461 zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
1462
1463 ret = zfcp_fsf_req_send(fsf_req);
1456 if (ret) { 1464 if (ret) {
1457 ZFCP_LOG_DEBUG("error: initiation of CT request failed " 1465 ZFCP_LOG_DEBUG("error: initiation of CT request failed "
1458 "(adapter %s, port 0x%016Lx)\n", 1466 "(adapter %s, port 0x%016Lx)\n",
@@ -1749,8 +1757,8 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
1749 1757
1750 zfcp_san_dbf_event_els_request(fsf_req); 1758 zfcp_san_dbf_event_els_request(fsf_req);
1751 1759
1752 /* start QDIO request for this FSF request */ 1760 zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
1753 ret = zfcp_fsf_req_send(fsf_req, els->timer); 1761 ret = zfcp_fsf_req_send(fsf_req);
1754 if (ret) { 1762 if (ret) {
1755 ZFCP_LOG_DEBUG("error: initiation of ELS request failed " 1763 ZFCP_LOG_DEBUG("error: initiation of ELS request failed "
1756 "(adapter %s, port d_id: 0x%08x)\n", 1764 "(adapter %s, port d_id: 0x%08x)\n",
@@ -1947,6 +1955,7 @@ int
1947zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) 1955zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1948{ 1956{
1949 volatile struct qdio_buffer_element *sbale; 1957 volatile struct qdio_buffer_element *sbale;
1958 struct zfcp_fsf_req *fsf_req;
1950 unsigned long lock_flags; 1959 unsigned long lock_flags;
1951 int retval = 0; 1960 int retval = 0;
1952 1961
@@ -1955,7 +1964,7 @@ zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1955 FSF_QTCB_EXCHANGE_CONFIG_DATA, 1964 FSF_QTCB_EXCHANGE_CONFIG_DATA,
1956 ZFCP_REQ_AUTO_CLEANUP, 1965 ZFCP_REQ_AUTO_CLEANUP,
1957 erp_action->adapter->pool.fsf_req_erp, 1966 erp_action->adapter->pool.fsf_req_erp,
1958 &lock_flags, &(erp_action->fsf_req)); 1967 &lock_flags, &fsf_req);
1959 if (retval < 0) { 1968 if (retval < 0) {
1960 ZFCP_LOG_INFO("error: Could not create exchange configuration " 1969 ZFCP_LOG_INFO("error: Could not create exchange configuration "
1961 "data request for adapter %s.\n", 1970 "data request for adapter %s.\n",
@@ -1963,26 +1972,26 @@ zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1963 goto out; 1972 goto out;
1964 } 1973 }
1965 1974
1966 sbale = zfcp_qdio_sbale_req(erp_action->fsf_req, 1975 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
1967 erp_action->fsf_req->sbal_curr, 0);
1968 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1976 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1969 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1977 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1970 1978
1971 erp_action->fsf_req->erp_action = erp_action; 1979 fsf_req->qtcb->bottom.config.feature_selection =
1972 erp_action->fsf_req->qtcb->bottom.config.feature_selection =
1973 FSF_FEATURE_CFDC | 1980 FSF_FEATURE_CFDC |
1974 FSF_FEATURE_LUN_SHARING | 1981 FSF_FEATURE_LUN_SHARING |
1975 FSF_FEATURE_NOTIFICATION_LOST | 1982 FSF_FEATURE_NOTIFICATION_LOST |
1976 FSF_FEATURE_UPDATE_ALERT; 1983 FSF_FEATURE_UPDATE_ALERT;
1984 fsf_req->erp_action = erp_action;
1985 erp_action->fsf_req = fsf_req;
1977 1986
1978 /* start QDIO request for this FSF request */ 1987 zfcp_erp_start_timer(fsf_req);
1979 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer); 1988 retval = zfcp_fsf_req_send(fsf_req);
1980 if (retval) { 1989 if (retval) {
1981 ZFCP_LOG_INFO 1990 ZFCP_LOG_INFO
1982 ("error: Could not send exchange configuration data " 1991 ("error: Could not send exchange configuration data "
1983 "command on the adapter %s\n", 1992 "command on the adapter %s\n",
1984 zfcp_get_busid_by_adapter(erp_action->adapter)); 1993 zfcp_get_busid_by_adapter(erp_action->adapter));
1985 zfcp_fsf_req_free(erp_action->fsf_req); 1994 zfcp_fsf_req_free(fsf_req);
1986 erp_action->fsf_req = NULL; 1995 erp_action->fsf_req = NULL;
1987 goto out; 1996 goto out;
1988 } 1997 }
@@ -2212,10 +2221,9 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action,
2212 struct fsf_qtcb_bottom_port *data) 2221 struct fsf_qtcb_bottom_port *data)
2213{ 2222{
2214 volatile struct qdio_buffer_element *sbale; 2223 volatile struct qdio_buffer_element *sbale;
2215 int retval = 0;
2216 unsigned long lock_flags;
2217 struct zfcp_fsf_req *fsf_req; 2224 struct zfcp_fsf_req *fsf_req;
2218 struct timer_list *timer; 2225 unsigned long lock_flags;
2226 int retval = 0;
2219 2227
2220 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) { 2228 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) {
2221 ZFCP_LOG_INFO("error: exchange port data " 2229 ZFCP_LOG_INFO("error: exchange port data "
@@ -2248,22 +2256,11 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action,
2248 if (erp_action) { 2256 if (erp_action) {
2249 erp_action->fsf_req = fsf_req; 2257 erp_action->fsf_req = fsf_req;
2250 fsf_req->erp_action = erp_action; 2258 fsf_req->erp_action = erp_action;
2251 timer = &erp_action->timer; 2259 zfcp_erp_start_timer(fsf_req);
2252 } else { 2260 } else
2253 timer = kmalloc(sizeof(struct timer_list), GFP_ATOMIC); 2261 zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
2254 if (!timer) {
2255 write_unlock_irqrestore(&adapter->request_queue.queue_lock,
2256 lock_flags);
2257 zfcp_fsf_req_free(fsf_req);
2258 return -ENOMEM;
2259 }
2260 init_timer(timer);
2261 timer->function = zfcp_fsf_request_timeout_handler;
2262 timer->data = (unsigned long) adapter;
2263 timer->expires = ZFCP_FSF_REQUEST_TIMEOUT;
2264 }
2265 2262
2266 retval = zfcp_fsf_req_send(fsf_req, timer); 2263 retval = zfcp_fsf_req_send(fsf_req);
2267 if (retval) { 2264 if (retval) {
2268 ZFCP_LOG_INFO("error: Could not send an exchange port data " 2265 ZFCP_LOG_INFO("error: Could not send an exchange port data "
2269 "command on the adapter %s\n", 2266 "command on the adapter %s\n",
@@ -2271,8 +2268,6 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action,
2271 zfcp_fsf_req_free(fsf_req); 2268 zfcp_fsf_req_free(fsf_req);
2272 if (erp_action) 2269 if (erp_action)
2273 erp_action->fsf_req = NULL; 2270 erp_action->fsf_req = NULL;
2274 else
2275 kfree(timer);
2276 write_unlock_irqrestore(&adapter->request_queue.queue_lock, 2271 write_unlock_irqrestore(&adapter->request_queue.queue_lock,
2277 lock_flags); 2272 lock_flags);
2278 return retval; 2273 return retval;
@@ -2283,9 +2278,7 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action,
2283 if (!erp_action) { 2278 if (!erp_action) {
2284 wait_event(fsf_req->completion_wq, 2279 wait_event(fsf_req->completion_wq,
2285 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); 2280 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
2286 del_timer_sync(timer);
2287 zfcp_fsf_req_free(fsf_req); 2281 zfcp_fsf_req_free(fsf_req);
2288 kfree(timer);
2289 } 2282 }
2290 return retval; 2283 return retval;
2291} 2284}
@@ -2367,6 +2360,7 @@ int
2367zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) 2360zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
2368{ 2361{
2369 volatile struct qdio_buffer_element *sbale; 2362 volatile struct qdio_buffer_element *sbale;
2363 struct zfcp_fsf_req *fsf_req;
2370 unsigned long lock_flags; 2364 unsigned long lock_flags;
2371 int retval = 0; 2365 int retval = 0;
2372 2366
@@ -2375,7 +2369,7 @@ zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
2375 FSF_QTCB_OPEN_PORT_WITH_DID, 2369 FSF_QTCB_OPEN_PORT_WITH_DID,
2376 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP, 2370 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
2377 erp_action->adapter->pool.fsf_req_erp, 2371 erp_action->adapter->pool.fsf_req_erp,
2378 &lock_flags, &(erp_action->fsf_req)); 2372 &lock_flags, &fsf_req);
2379 if (retval < 0) { 2373 if (retval < 0) {
2380 ZFCP_LOG_INFO("error: Could not create open port request " 2374 ZFCP_LOG_INFO("error: Could not create open port request "
2381 "for port 0x%016Lx on adapter %s.\n", 2375 "for port 0x%016Lx on adapter %s.\n",
@@ -2384,24 +2378,24 @@ zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
2384 goto out; 2378 goto out;
2385 } 2379 }
2386 2380
2387 sbale = zfcp_qdio_sbale_req(erp_action->fsf_req, 2381 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
2388 erp_action->fsf_req->sbal_curr, 0);
2389 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 2382 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2390 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2383 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2391 2384
2392 erp_action->fsf_req->qtcb->bottom.support.d_id = erp_action->port->d_id; 2385 fsf_req->qtcb->bottom.support.d_id = erp_action->port->d_id;
2393 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status); 2386 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status);
2394 erp_action->fsf_req->data = (unsigned long) erp_action->port; 2387 fsf_req->data = (unsigned long) erp_action->port;
2395 erp_action->fsf_req->erp_action = erp_action; 2388 fsf_req->erp_action = erp_action;
2389 erp_action->fsf_req = fsf_req;
2396 2390
2397 /* start QDIO request for this FSF request */ 2391 zfcp_erp_start_timer(fsf_req);
2398 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer); 2392 retval = zfcp_fsf_req_send(fsf_req);
2399 if (retval) { 2393 if (retval) {
2400 ZFCP_LOG_INFO("error: Could not send open port request for " 2394 ZFCP_LOG_INFO("error: Could not send open port request for "
2401 "port 0x%016Lx on adapter %s.\n", 2395 "port 0x%016Lx on adapter %s.\n",
2402 erp_action->port->wwpn, 2396 erp_action->port->wwpn,
2403 zfcp_get_busid_by_adapter(erp_action->adapter)); 2397 zfcp_get_busid_by_adapter(erp_action->adapter));
2404 zfcp_fsf_req_free(erp_action->fsf_req); 2398 zfcp_fsf_req_free(fsf_req);
2405 erp_action->fsf_req = NULL; 2399 erp_action->fsf_req = NULL;
2406 goto out; 2400 goto out;
2407 } 2401 }
@@ -2623,6 +2617,7 @@ int
2623zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) 2617zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
2624{ 2618{
2625 volatile struct qdio_buffer_element *sbale; 2619 volatile struct qdio_buffer_element *sbale;
2620 struct zfcp_fsf_req *fsf_req;
2626 unsigned long lock_flags; 2621 unsigned long lock_flags;
2627 int retval = 0; 2622 int retval = 0;
2628 2623
@@ -2631,7 +2626,7 @@ zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
2631 FSF_QTCB_CLOSE_PORT, 2626 FSF_QTCB_CLOSE_PORT,
2632 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP, 2627 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
2633 erp_action->adapter->pool.fsf_req_erp, 2628 erp_action->adapter->pool.fsf_req_erp,
2634 &lock_flags, &(erp_action->fsf_req)); 2629 &lock_flags, &fsf_req);
2635 if (retval < 0) { 2630 if (retval < 0) {
2636 ZFCP_LOG_INFO("error: Could not create a close port request " 2631 ZFCP_LOG_INFO("error: Could not create a close port request "
2637 "for port 0x%016Lx on adapter %s.\n", 2632 "for port 0x%016Lx on adapter %s.\n",
@@ -2640,25 +2635,25 @@ zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
2640 goto out; 2635 goto out;
2641 } 2636 }
2642 2637
2643 sbale = zfcp_qdio_sbale_req(erp_action->fsf_req, 2638 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
2644 erp_action->fsf_req->sbal_curr, 0);
2645 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 2639 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2646 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2640 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2647 2641
2648 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status); 2642 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status);
2649 erp_action->fsf_req->data = (unsigned long) erp_action->port; 2643 fsf_req->data = (unsigned long) erp_action->port;
2650 erp_action->fsf_req->erp_action = erp_action; 2644 fsf_req->erp_action = erp_action;
2651 erp_action->fsf_req->qtcb->header.port_handle = 2645 fsf_req->qtcb->header.port_handle = erp_action->port->handle;
2652 erp_action->port->handle; 2646 fsf_req->erp_action = erp_action;
2653 2647 erp_action->fsf_req = fsf_req;
2654 /* start QDIO request for this FSF request */ 2648
2655 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer); 2649 zfcp_erp_start_timer(fsf_req);
2650 retval = zfcp_fsf_req_send(fsf_req);
2656 if (retval) { 2651 if (retval) {
2657 ZFCP_LOG_INFO("error: Could not send a close port request for " 2652 ZFCP_LOG_INFO("error: Could not send a close port request for "
2658 "port 0x%016Lx on adapter %s.\n", 2653 "port 0x%016Lx on adapter %s.\n",
2659 erp_action->port->wwpn, 2654 erp_action->port->wwpn,
2660 zfcp_get_busid_by_adapter(erp_action->adapter)); 2655 zfcp_get_busid_by_adapter(erp_action->adapter));
2661 zfcp_fsf_req_free(erp_action->fsf_req); 2656 zfcp_fsf_req_free(fsf_req);
2662 erp_action->fsf_req = NULL; 2657 erp_action->fsf_req = NULL;
2663 goto out; 2658 goto out;
2664 } 2659 }
@@ -2755,16 +2750,17 @@ zfcp_fsf_close_port_handler(struct zfcp_fsf_req *fsf_req)
2755int 2750int
2756zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) 2751zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
2757{ 2752{
2758 int retval = 0;
2759 unsigned long lock_flags;
2760 volatile struct qdio_buffer_element *sbale; 2753 volatile struct qdio_buffer_element *sbale;
2754 struct zfcp_fsf_req *fsf_req;
2755 unsigned long lock_flags;
2756 int retval = 0;
2761 2757
2762 /* setup new FSF request */ 2758 /* setup new FSF request */
2763 retval = zfcp_fsf_req_create(erp_action->adapter, 2759 retval = zfcp_fsf_req_create(erp_action->adapter,
2764 FSF_QTCB_CLOSE_PHYSICAL_PORT, 2760 FSF_QTCB_CLOSE_PHYSICAL_PORT,
2765 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP, 2761 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
2766 erp_action->adapter->pool.fsf_req_erp, 2762 erp_action->adapter->pool.fsf_req_erp,
2767 &lock_flags, &erp_action->fsf_req); 2763 &lock_flags, &fsf_req);
2768 if (retval < 0) { 2764 if (retval < 0) {
2769 ZFCP_LOG_INFO("error: Could not create close physical port " 2765 ZFCP_LOG_INFO("error: Could not create close physical port "
2770 "request (adapter %s, port 0x%016Lx)\n", 2766 "request (adapter %s, port 0x%016Lx)\n",
@@ -2774,8 +2770,7 @@ zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
2774 goto out; 2770 goto out;
2775 } 2771 }
2776 2772
2777 sbale = zfcp_qdio_sbale_req(erp_action->fsf_req, 2773 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
2778 erp_action->fsf_req->sbal_curr, 0);
2779 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 2774 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2780 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2775 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2781 2776
@@ -2783,20 +2778,19 @@ zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
2783 atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING, 2778 atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING,
2784 &erp_action->port->status); 2779 &erp_action->port->status);
2785 /* save a pointer to this port */ 2780 /* save a pointer to this port */
2786 erp_action->fsf_req->data = (unsigned long) erp_action->port; 2781 fsf_req->data = (unsigned long) erp_action->port;
2787 /* port to be closed */ 2782 fsf_req->qtcb->header.port_handle = erp_action->port->handle;
2788 erp_action->fsf_req->qtcb->header.port_handle = 2783 fsf_req->erp_action = erp_action;
2789 erp_action->port->handle; 2784 erp_action->fsf_req = fsf_req;
2790 erp_action->fsf_req->erp_action = erp_action; 2785
2791 2786 zfcp_erp_start_timer(fsf_req);
2792 /* start QDIO request for this FSF request */ 2787 retval = zfcp_fsf_req_send(fsf_req);
2793 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer);
2794 if (retval) { 2788 if (retval) {
2795 ZFCP_LOG_INFO("error: Could not send close physical port " 2789 ZFCP_LOG_INFO("error: Could not send close physical port "
2796 "request (adapter %s, port 0x%016Lx)\n", 2790 "request (adapter %s, port 0x%016Lx)\n",
2797 zfcp_get_busid_by_adapter(erp_action->adapter), 2791 zfcp_get_busid_by_adapter(erp_action->adapter),
2798 erp_action->port->wwpn); 2792 erp_action->port->wwpn);
2799 zfcp_fsf_req_free(erp_action->fsf_req); 2793 zfcp_fsf_req_free(fsf_req);
2800 erp_action->fsf_req = NULL; 2794 erp_action->fsf_req = NULL;
2801 goto out; 2795 goto out;
2802 } 2796 }
@@ -2961,6 +2955,7 @@ int
2961zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) 2955zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
2962{ 2956{
2963 volatile struct qdio_buffer_element *sbale; 2957 volatile struct qdio_buffer_element *sbale;
2958 struct zfcp_fsf_req *fsf_req;
2964 unsigned long lock_flags; 2959 unsigned long lock_flags;
2965 int retval = 0; 2960 int retval = 0;
2966 2961
@@ -2969,7 +2964,7 @@ zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
2969 FSF_QTCB_OPEN_LUN, 2964 FSF_QTCB_OPEN_LUN,
2970 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP, 2965 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
2971 erp_action->adapter->pool.fsf_req_erp, 2966 erp_action->adapter->pool.fsf_req_erp,
2972 &lock_flags, &(erp_action->fsf_req)); 2967 &lock_flags, &fsf_req);
2973 if (retval < 0) { 2968 if (retval < 0) {
2974 ZFCP_LOG_INFO("error: Could not create open unit request for " 2969 ZFCP_LOG_INFO("error: Could not create open unit request for "
2975 "unit 0x%016Lx on port 0x%016Lx on adapter %s.\n", 2970 "unit 0x%016Lx on port 0x%016Lx on adapter %s.\n",
@@ -2979,24 +2974,22 @@ zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
2979 goto out; 2974 goto out;
2980 } 2975 }
2981 2976
2982 sbale = zfcp_qdio_sbale_req(erp_action->fsf_req, 2977 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
2983 erp_action->fsf_req->sbal_curr, 0);
2984 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 2978 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2985 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2979 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2986 2980
2987 erp_action->fsf_req->qtcb->header.port_handle = 2981 fsf_req->qtcb->header.port_handle = erp_action->port->handle;
2988 erp_action->port->handle; 2982 fsf_req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun;
2989 erp_action->fsf_req->qtcb->bottom.support.fcp_lun =
2990 erp_action->unit->fcp_lun;
2991 if (!(erp_action->adapter->connection_features & FSF_FEATURE_NPIV_MODE)) 2983 if (!(erp_action->adapter->connection_features & FSF_FEATURE_NPIV_MODE))
2992 erp_action->fsf_req->qtcb->bottom.support.option = 2984 fsf_req->qtcb->bottom.support.option =
2993 FSF_OPEN_LUN_SUPPRESS_BOXING; 2985 FSF_OPEN_LUN_SUPPRESS_BOXING;
2994 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status); 2986 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status);
2995 erp_action->fsf_req->data = (unsigned long) erp_action->unit; 2987 fsf_req->data = (unsigned long) erp_action->unit;
2996 erp_action->fsf_req->erp_action = erp_action; 2988 fsf_req->erp_action = erp_action;
2989 erp_action->fsf_req = fsf_req;
2997 2990
2998 /* start QDIO request for this FSF request */ 2991 zfcp_erp_start_timer(fsf_req);
2999 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer); 2992 retval = zfcp_fsf_req_send(erp_action->fsf_req);
3000 if (retval) { 2993 if (retval) {
3001 ZFCP_LOG_INFO("error: Could not send an open unit request " 2994 ZFCP_LOG_INFO("error: Could not send an open unit request "
3002 "on the adapter %s, port 0x%016Lx for " 2995 "on the adapter %s, port 0x%016Lx for "
@@ -3004,7 +2997,7 @@ zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
3004 zfcp_get_busid_by_adapter(erp_action->adapter), 2997 zfcp_get_busid_by_adapter(erp_action->adapter),
3005 erp_action->port->wwpn, 2998 erp_action->port->wwpn,
3006 erp_action->unit->fcp_lun); 2999 erp_action->unit->fcp_lun);
3007 zfcp_fsf_req_free(erp_action->fsf_req); 3000 zfcp_fsf_req_free(fsf_req);
3008 erp_action->fsf_req = NULL; 3001 erp_action->fsf_req = NULL;
3009 goto out; 3002 goto out;
3010 } 3003 }
@@ -3297,6 +3290,7 @@ int
3297zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) 3290zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
3298{ 3291{
3299 volatile struct qdio_buffer_element *sbale; 3292 volatile struct qdio_buffer_element *sbale;
3293 struct zfcp_fsf_req *fsf_req;
3300 unsigned long lock_flags; 3294 unsigned long lock_flags;
3301 int retval = 0; 3295 int retval = 0;
3302 3296
@@ -3305,7 +3299,7 @@ zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
3305 FSF_QTCB_CLOSE_LUN, 3299 FSF_QTCB_CLOSE_LUN,
3306 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP, 3300 ZFCP_WAIT_FOR_SBAL | ZFCP_REQ_AUTO_CLEANUP,
3307 erp_action->adapter->pool.fsf_req_erp, 3301 erp_action->adapter->pool.fsf_req_erp,
3308 &lock_flags, &(erp_action->fsf_req)); 3302 &lock_flags, &fsf_req);
3309 if (retval < 0) { 3303 if (retval < 0) {
3310 ZFCP_LOG_INFO("error: Could not create close unit request for " 3304 ZFCP_LOG_INFO("error: Could not create close unit request for "
3311 "unit 0x%016Lx on port 0x%016Lx on adapter %s.\n", 3305 "unit 0x%016Lx on port 0x%016Lx on adapter %s.\n",
@@ -3315,27 +3309,26 @@ zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
3315 goto out; 3309 goto out;
3316 } 3310 }
3317 3311
3318 sbale = zfcp_qdio_sbale_req(erp_action->fsf_req, 3312 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
3319 erp_action->fsf_req->sbal_curr, 0);
3320 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 3313 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
3321 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 3314 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
3322 3315
3323 erp_action->fsf_req->qtcb->header.port_handle = 3316 fsf_req->qtcb->header.port_handle = erp_action->port->handle;
3324 erp_action->port->handle; 3317 fsf_req->qtcb->header.lun_handle = erp_action->unit->handle;
3325 erp_action->fsf_req->qtcb->header.lun_handle = erp_action->unit->handle;
3326 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status); 3318 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status);
3327 erp_action->fsf_req->data = (unsigned long) erp_action->unit; 3319 fsf_req->data = (unsigned long) erp_action->unit;
3328 erp_action->fsf_req->erp_action = erp_action; 3320 fsf_req->erp_action = erp_action;
3321 erp_action->fsf_req = fsf_req;
3329 3322
3330 /* start QDIO request for this FSF request */ 3323 zfcp_erp_start_timer(fsf_req);
3331 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer); 3324 retval = zfcp_fsf_req_send(erp_action->fsf_req);
3332 if (retval) { 3325 if (retval) {
3333 ZFCP_LOG_INFO("error: Could not send a close unit request for " 3326 ZFCP_LOG_INFO("error: Could not send a close unit request for "
3334 "unit 0x%016Lx on port 0x%016Lx onadapter %s.\n", 3327 "unit 0x%016Lx on port 0x%016Lx onadapter %s.\n",
3335 erp_action->unit->fcp_lun, 3328 erp_action->unit->fcp_lun,
3336 erp_action->port->wwpn, 3329 erp_action->port->wwpn,
3337 zfcp_get_busid_by_adapter(erp_action->adapter)); 3330 zfcp_get_busid_by_adapter(erp_action->adapter));
3338 zfcp_fsf_req_free(erp_action->fsf_req); 3331 zfcp_fsf_req_free(fsf_req);
3339 erp_action->fsf_req = NULL; 3332 erp_action->fsf_req = NULL;
3340 goto out; 3333 goto out;
3341 } 3334 }
@@ -3488,7 +3481,7 @@ int
3488zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter, 3481zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
3489 struct zfcp_unit *unit, 3482 struct zfcp_unit *unit,
3490 struct scsi_cmnd * scsi_cmnd, 3483 struct scsi_cmnd * scsi_cmnd,
3491 struct timer_list *timer, int req_flags) 3484 int use_timer, int req_flags)
3492{ 3485{
3493 struct zfcp_fsf_req *fsf_req = NULL; 3486 struct zfcp_fsf_req *fsf_req = NULL;
3494 struct fcp_cmnd_iu *fcp_cmnd_iu; 3487 struct fcp_cmnd_iu *fcp_cmnd_iu;
@@ -3516,7 +3509,7 @@ zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
3516 fsf_req->unit = unit; 3509 fsf_req->unit = unit;
3517 3510
3518 /* associate FSF request with SCSI request (for look up on abort) */ 3511 /* associate FSF request with SCSI request (for look up on abort) */
3519 scsi_cmnd->host_scribble = (char *) fsf_req; 3512 scsi_cmnd->host_scribble = (unsigned char *) fsf_req->req_id;
3520 3513
3521 /* associate SCSI command with FSF request */ 3514 /* associate SCSI command with FSF request */
3522 fsf_req->data = (unsigned long) scsi_cmnd; 3515 fsf_req->data = (unsigned long) scsi_cmnd;
@@ -3629,11 +3622,10 @@ zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
3629 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, 3622 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
3630 (char *) scsi_cmnd->cmnd, scsi_cmnd->cmd_len); 3623 (char *) scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
3631 3624
3632 /* 3625 if (use_timer)
3633 * start QDIO request for this FSF request 3626 zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
3634 * covered by an SBALE) 3627
3635 */ 3628 retval = zfcp_fsf_req_send(fsf_req);
3636 retval = zfcp_fsf_req_send(fsf_req, timer);
3637 if (unlikely(retval < 0)) { 3629 if (unlikely(retval < 0)) {
3638 ZFCP_LOG_INFO("error: Could not send FCP command request " 3630 ZFCP_LOG_INFO("error: Could not send FCP command request "
3639 "on adapter %s, port 0x%016Lx, unit 0x%016Lx\n", 3631 "on adapter %s, port 0x%016Lx, unit 0x%016Lx\n",
@@ -3718,11 +3710,9 @@ zfcp_fsf_send_fcp_command_task_management(struct zfcp_adapter *adapter,
3718 fcp_cmnd_iu->fcp_lun = unit->fcp_lun; 3710 fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
3719 fcp_cmnd_iu->task_management_flags = tm_flags; 3711 fcp_cmnd_iu->task_management_flags = tm_flags;
3720 3712
3721 /* start QDIO request for this FSF request */ 3713 zfcp_fsf_start_timer(fsf_req, ZFCP_SCSI_ER_TIMEOUT);
3722 zfcp_fsf_start_scsi_er_timer(adapter); 3714 retval = zfcp_fsf_req_send(fsf_req);
3723 retval = zfcp_fsf_req_send(fsf_req, NULL);
3724 if (retval) { 3715 if (retval) {
3725 del_timer(&adapter->scsi_er_timer);
3726 ZFCP_LOG_INFO("error: Could not send an FCP-command (task " 3716 ZFCP_LOG_INFO("error: Could not send an FCP-command (task "
3727 "management) on adapter %s, port 0x%016Lx for " 3717 "management) on adapter %s, port 0x%016Lx for "
3728 "unit LUN 0x%016Lx\n", 3718 "unit LUN 0x%016Lx\n",
@@ -4226,7 +4216,6 @@ zfcp_fsf_send_fcp_command_task_management_handler(struct zfcp_fsf_req *fsf_req)
4226 char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu); 4216 char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu);
4227 struct zfcp_unit *unit = (struct zfcp_unit *) fsf_req->data; 4217 struct zfcp_unit *unit = (struct zfcp_unit *) fsf_req->data;
4228 4218
4229 del_timer(&fsf_req->adapter->scsi_er_timer);
4230 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 4219 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
4231 fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED; 4220 fsf_req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
4232 goto skip_fsfstatus; 4221 goto skip_fsfstatus;
@@ -4295,7 +4284,6 @@ zfcp_fsf_control_file(struct zfcp_adapter *adapter,
4295 struct zfcp_fsf_req *fsf_req; 4284 struct zfcp_fsf_req *fsf_req;
4296 struct fsf_qtcb_bottom_support *bottom; 4285 struct fsf_qtcb_bottom_support *bottom;
4297 volatile struct qdio_buffer_element *sbale; 4286 volatile struct qdio_buffer_element *sbale;
4298 struct timer_list *timer;
4299 unsigned long lock_flags; 4287 unsigned long lock_flags;
4300 int req_flags = 0; 4288 int req_flags = 0;
4301 int direction; 4289 int direction;
@@ -4327,12 +4315,6 @@ zfcp_fsf_control_file(struct zfcp_adapter *adapter,
4327 goto out; 4315 goto out;
4328 } 4316 }
4329 4317
4330 timer = kmalloc(sizeof(struct timer_list), GFP_KERNEL);
4331 if (!timer) {
4332 retval = -ENOMEM;
4333 goto out;
4334 }
4335
4336 retval = zfcp_fsf_req_create(adapter, fsf_command, req_flags, 4318 retval = zfcp_fsf_req_create(adapter, fsf_command, req_flags,
4337 NULL, &lock_flags, &fsf_req); 4319 NULL, &lock_flags, &fsf_req);
4338 if (retval < 0) { 4320 if (retval < 0) {
@@ -4367,12 +4349,8 @@ zfcp_fsf_control_file(struct zfcp_adapter *adapter,
4367 } else 4349 } else
4368 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 4350 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
4369 4351
4370 init_timer(timer); 4352 zfcp_fsf_start_timer(fsf_req, ZFCP_FSF_REQUEST_TIMEOUT);
4371 timer->function = zfcp_fsf_request_timeout_handler; 4353 retval = zfcp_fsf_req_send(fsf_req);
4372 timer->data = (unsigned long) adapter;
4373 timer->expires = ZFCP_FSF_REQUEST_TIMEOUT;
4374
4375 retval = zfcp_fsf_req_send(fsf_req, timer);
4376 if (retval < 0) { 4354 if (retval < 0) {
4377 ZFCP_LOG_INFO("initiation of cfdc up/download failed" 4355 ZFCP_LOG_INFO("initiation of cfdc up/download failed"
4378 "(adapter %s)\n", 4356 "(adapter %s)\n",
@@ -4392,15 +4370,12 @@ zfcp_fsf_control_file(struct zfcp_adapter *adapter,
4392 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); 4370 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
4393 4371
4394 *fsf_req_ptr = fsf_req; 4372 *fsf_req_ptr = fsf_req;
4395 del_timer_sync(timer); 4373 goto out;
4396 goto free_timer;
4397 4374
4398 free_fsf_req: 4375 free_fsf_req:
4399 zfcp_fsf_req_free(fsf_req); 4376 zfcp_fsf_req_free(fsf_req);
4400 unlock_queue_lock: 4377 unlock_queue_lock:
4401 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); 4378 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
4402 free_timer:
4403 kfree(timer);
4404 out: 4379 out:
4405 return retval; 4380 return retval;
4406} 4381}
@@ -4656,7 +4631,6 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4656{ 4631{
4657 volatile struct qdio_buffer_element *sbale; 4632 volatile struct qdio_buffer_element *sbale;
4658 struct zfcp_fsf_req *fsf_req = NULL; 4633 struct zfcp_fsf_req *fsf_req = NULL;
4659 unsigned long flags;
4660 int ret = 0; 4634 int ret = 0;
4661 struct zfcp_qdio_queue *req_queue = &adapter->request_queue; 4635 struct zfcp_qdio_queue *req_queue = &adapter->request_queue;
4662 4636
@@ -4673,12 +4647,13 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4673 fsf_req->fsf_command = fsf_cmd; 4647 fsf_req->fsf_command = fsf_cmd;
4674 INIT_LIST_HEAD(&fsf_req->list); 4648 INIT_LIST_HEAD(&fsf_req->list);
4675 4649
4676 /* unique request id */ 4650 /* this is serialized (we are holding req_queue-lock of adapter */
4677 spin_lock_irqsave(&adapter->req_list_lock, flags); 4651 if (adapter->req_no == 0)
4652 adapter->req_no++;
4678 fsf_req->req_id = adapter->req_no++; 4653 fsf_req->req_id = adapter->req_no++;
4679 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
4680 4654
4681 zfcp_fsf_req_qtcb_init(fsf_req); 4655 init_timer(&fsf_req->timer);
4656 zfcp_fsf_req_qtcb_init(fsf_req);
4682 4657
4683 /* initialize waitqueue which may be used to wait on 4658 /* initialize waitqueue which may be used to wait on
4684 this request completion */ 4659 this request completion */
@@ -4748,8 +4723,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4748 * returns: 0 - request transfer succesfully started 4723 * returns: 0 - request transfer succesfully started
4749 * !0 - start of request transfer failed 4724 * !0 - start of request transfer failed
4750 */ 4725 */
4751static int 4726static int zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req)
4752zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4753{ 4727{
4754 struct zfcp_adapter *adapter; 4728 struct zfcp_adapter *adapter;
4755 struct zfcp_qdio_queue *req_queue; 4729 struct zfcp_qdio_queue *req_queue;
@@ -4777,12 +4751,6 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4777 4751
4778 inc_seq_no = (fsf_req->qtcb != NULL); 4752 inc_seq_no = (fsf_req->qtcb != NULL);
4779 4753
4780 /* figure out expiration time of timeout and start timeout */
4781 if (unlikely(timer)) {
4782 timer->expires += jiffies;
4783 add_timer(timer);
4784 }
4785
4786 ZFCP_LOG_TRACE("request queue of adapter %s: " 4754 ZFCP_LOG_TRACE("request queue of adapter %s: "
4787 "next free SBAL is %i, %i free SBALs\n", 4755 "next free SBAL is %i, %i free SBALs\n",
4788 zfcp_get_busid_by_adapter(adapter), 4756 zfcp_get_busid_by_adapter(adapter),
@@ -4819,12 +4787,7 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4819 if (unlikely(retval)) { 4787 if (unlikely(retval)) {
4820 /* Queues are down..... */ 4788 /* Queues are down..... */
4821 retval = -EIO; 4789 retval = -EIO;
4822 /* 4790 del_timer(&fsf_req->timer);
4823 * FIXME(potential race):
4824 * timer might be expired (absolutely unlikely)
4825 */
4826 if (timer)
4827 del_timer(timer);
4828 spin_lock(&adapter->req_list_lock); 4791 spin_lock(&adapter->req_list_lock);
4829 zfcp_reqlist_remove(adapter, fsf_req->req_id); 4792 zfcp_reqlist_remove(adapter, fsf_req->req_id);
4830 spin_unlock(&adapter->req_list_lock); 4793 spin_unlock(&adapter->req_list_lock);
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 1bb55086db9f..7cafa34e4c7f 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -39,11 +39,10 @@ static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *, int,
39 39
40static struct device_attribute *zfcp_sysfs_sdev_attrs[]; 40static struct device_attribute *zfcp_sysfs_sdev_attrs[];
41 41
42struct scsi_transport_template *zfcp_transport_template;
43
44struct zfcp_data zfcp_data = { 42struct zfcp_data zfcp_data = {
45 .scsi_host_template = { 43 .scsi_host_template = {
46 .name = ZFCP_NAME, 44 .name = ZFCP_NAME,
45 .module = THIS_MODULE,
47 .proc_name = "zfcp", 46 .proc_name = "zfcp",
48 .slave_alloc = zfcp_scsi_slave_alloc, 47 .slave_alloc = zfcp_scsi_slave_alloc,
49 .slave_configure = zfcp_scsi_slave_configure, 48 .slave_configure = zfcp_scsi_slave_configure,
@@ -232,7 +231,7 @@ zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
232 */ 231 */
233int 232int
234zfcp_scsi_command_async(struct zfcp_adapter *adapter, struct zfcp_unit *unit, 233zfcp_scsi_command_async(struct zfcp_adapter *adapter, struct zfcp_unit *unit,
235 struct scsi_cmnd *scpnt, struct timer_list *timer) 234 struct scsi_cmnd *scpnt, int use_timer)
236{ 235{
237 int tmp; 236 int tmp;
238 int retval; 237 int retval;
@@ -268,7 +267,7 @@ zfcp_scsi_command_async(struct zfcp_adapter *adapter, struct zfcp_unit *unit,
268 goto out; 267 goto out;
269 } 268 }
270 269
271 tmp = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, timer, 270 tmp = zfcp_fsf_send_fcp_command_task(adapter, unit, scpnt, use_timer,
272 ZFCP_REQ_AUTO_CLEANUP); 271 ZFCP_REQ_AUTO_CLEANUP);
273 272
274 if (unlikely(tmp < 0)) { 273 if (unlikely(tmp < 0)) {
@@ -292,21 +291,22 @@ zfcp_scsi_command_sync_handler(struct scsi_cmnd *scpnt)
292 * zfcp_scsi_command_sync - send a SCSI command and wait for completion 291 * zfcp_scsi_command_sync - send a SCSI command and wait for completion
293 * @unit: unit where command is sent to 292 * @unit: unit where command is sent to
294 * @scpnt: scsi command to be sent 293 * @scpnt: scsi command to be sent
295 * @timer: timer to be started if request is successfully initiated 294 * @use_timer: indicates whether timer should be setup or not
296 * Return: 0 295 * Return: 0
297 * 296 *
298 * Errors are indicated in scpnt->result 297 * Errors are indicated in scpnt->result
299 */ 298 */
300int 299int
301zfcp_scsi_command_sync(struct zfcp_unit *unit, struct scsi_cmnd *scpnt, 300zfcp_scsi_command_sync(struct zfcp_unit *unit, struct scsi_cmnd *scpnt,
302 struct timer_list *timer) 301 int use_timer)
303{ 302{
304 int ret; 303 int ret;
305 DECLARE_COMPLETION(wait); 304 DECLARE_COMPLETION(wait);
306 305
307 scpnt->SCp.ptr = (void *) &wait; /* silent re-use */ 306 scpnt->SCp.ptr = (void *) &wait; /* silent re-use */
308 scpnt->scsi_done = zfcp_scsi_command_sync_handler; 307 scpnt->scsi_done = zfcp_scsi_command_sync_handler;
309 ret = zfcp_scsi_command_async(unit->port->adapter, unit, scpnt, timer); 308 ret = zfcp_scsi_command_async(unit->port->adapter, unit, scpnt,
309 use_timer);
310 if (ret == 0) 310 if (ret == 0)
311 wait_for_completion(&wait); 311 wait_for_completion(&wait);
312 312
@@ -342,7 +342,7 @@ zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
342 adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0]; 342 adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
343 unit = (struct zfcp_unit *) scpnt->device->hostdata; 343 unit = (struct zfcp_unit *) scpnt->device->hostdata;
344 344
345 return zfcp_scsi_command_async(adapter, unit, scpnt, NULL); 345 return zfcp_scsi_command_async(adapter, unit, scpnt, 0);
346} 346}
347 347
348static struct zfcp_unit * 348static struct zfcp_unit *
@@ -379,16 +379,15 @@ zfcp_unit_lookup(struct zfcp_adapter *adapter, int channel, unsigned int id,
379 * will handle late commands. (Usually, the normal completion of late 379 * will handle late commands. (Usually, the normal completion of late
380 * commands is ignored with respect to the running abort operation.) 380 * commands is ignored with respect to the running abort operation.)
381 */ 381 */
382int 382int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
383zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
384{ 383{
385 struct Scsi_Host *scsi_host; 384 struct Scsi_Host *scsi_host;
386 struct zfcp_adapter *adapter; 385 struct zfcp_adapter *adapter;
387 struct zfcp_unit *unit; 386 struct zfcp_unit *unit;
388 int retval = SUCCESS; 387 struct zfcp_fsf_req *fsf_req;
389 struct zfcp_fsf_req *new_fsf_req = NULL;
390 struct zfcp_fsf_req *old_fsf_req;
391 unsigned long flags; 388 unsigned long flags;
389 unsigned long old_req_id;
390 int retval = SUCCESS;
392 391
393 scsi_host = scpnt->device->host; 392 scsi_host = scpnt->device->host;
394 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0]; 393 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
@@ -400,55 +399,47 @@ zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
400 /* avoid race condition between late normal completion and abort */ 399 /* avoid race condition between late normal completion and abort */
401 write_lock_irqsave(&adapter->abort_lock, flags); 400 write_lock_irqsave(&adapter->abort_lock, flags);
402 401
403 /* 402 /* Check whether corresponding fsf_req is still pending */
404 * Check whether command has just completed and can not be aborted. 403 spin_lock(&adapter->req_list_lock);
405 * Even if the command has just been completed late, we can access 404 fsf_req = zfcp_reqlist_ismember(adapter, (unsigned long)
406 * scpnt since the SCSI stack does not release it at least until 405 scpnt->host_scribble);
407 * this routine returns. (scpnt is parameter passed to this routine 406 spin_unlock(&adapter->req_list_lock);
408 * and must not disappear during abort even on late completion.) 407 if (!fsf_req) {
409 */
410 old_fsf_req = (struct zfcp_fsf_req *) scpnt->host_scribble;
411 if (!old_fsf_req) {
412 write_unlock_irqrestore(&adapter->abort_lock, flags); 408 write_unlock_irqrestore(&adapter->abort_lock, flags);
413 zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, NULL); 409 zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, 0);
414 retval = SUCCESS; 410 retval = SUCCESS;
415 goto out; 411 goto out;
416 } 412 }
417 old_fsf_req->data = 0; 413 fsf_req->data = 0;
418 old_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING; 414 fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING;
415 old_req_id = fsf_req->req_id;
419 416
420 /* don't access old_fsf_req after releasing the abort_lock */ 417 /* don't access old fsf_req after releasing the abort_lock */
421 write_unlock_irqrestore(&adapter->abort_lock, flags); 418 write_unlock_irqrestore(&adapter->abort_lock, flags);
422 /* call FSF routine which does the abort */ 419
423 new_fsf_req = zfcp_fsf_abort_fcp_command((unsigned long) old_fsf_req, 420 fsf_req = zfcp_fsf_abort_fcp_command(old_req_id, adapter, unit, 0);
424 adapter, unit, 0); 421 if (!fsf_req) {
425 if (!new_fsf_req) {
426 ZFCP_LOG_INFO("error: initiation of Abort FCP Cmnd failed\n"); 422 ZFCP_LOG_INFO("error: initiation of Abort FCP Cmnd failed\n");
427 zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL, 423 zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL,
428 old_fsf_req); 424 old_req_id);
429 retval = FAILED; 425 retval = FAILED;
430 goto out; 426 goto out;
431 } 427 }
432 428
433 /* wait for completion of abort */ 429 __wait_event(fsf_req->completion_wq,
434 __wait_event(new_fsf_req->completion_wq, 430 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
435 new_fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
436 431
437 /* status should be valid since signals were not permitted */ 432 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) {
438 if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) { 433 zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, fsf_req, 0);
439 zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, new_fsf_req,
440 NULL);
441 retval = SUCCESS; 434 retval = SUCCESS;
442 } else if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) { 435 } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) {
443 zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, new_fsf_req, 436 zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, fsf_req, 0);
444 NULL);
445 retval = SUCCESS; 437 retval = SUCCESS;
446 } else { 438 } else {
447 zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, new_fsf_req, 439 zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, fsf_req, 0);
448 NULL);
449 retval = FAILED; 440 retval = FAILED;
450 } 441 }
451 zfcp_fsf_req_free(new_fsf_req); 442 zfcp_fsf_req_free(fsf_req);
452 out: 443 out:
453 return retval; 444 return retval;
454} 445}
@@ -548,14 +539,11 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags,
548 539
549/** 540/**
550 * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset 541 * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset
551 *
552 * If ERP is already running it will be stopped.
553 */ 542 */
554int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) 543int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
555{ 544{
556 struct zfcp_unit *unit; 545 struct zfcp_unit *unit;
557 struct zfcp_adapter *adapter; 546 struct zfcp_adapter *adapter;
558 unsigned long flags;
559 547
560 unit = (struct zfcp_unit*) scpnt->device->hostdata; 548 unit = (struct zfcp_unit*) scpnt->device->hostdata;
561 adapter = unit->port->adapter; 549 adapter = unit->port->adapter;
@@ -563,22 +551,8 @@ int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
563 ZFCP_LOG_NORMAL("host/bus reset because of problems with " 551 ZFCP_LOG_NORMAL("host/bus reset because of problems with "
564 "unit 0x%016Lx\n", unit->fcp_lun); 552 "unit 0x%016Lx\n", unit->fcp_lun);
565 553
566 write_lock_irqsave(&adapter->erp_lock, flags); 554 zfcp_erp_adapter_reopen(adapter, 0);
567 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, 555 zfcp_erp_wait(adapter);
568 &adapter->status)) {
569 zfcp_erp_modify_adapter_status(adapter,
570 ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN,
571 ZFCP_CLEAR);
572 zfcp_erp_action_dismiss_adapter(adapter);
573 write_unlock_irqrestore(&adapter->erp_lock, flags);
574 zfcp_fsf_req_dismiss_all(adapter);
575 adapter->fsf_req_seq_no = 0;
576 zfcp_erp_adapter_reopen(adapter, 0);
577 } else {
578 write_unlock_irqrestore(&adapter->erp_lock, flags);
579 zfcp_erp_adapter_reopen(adapter, 0);
580 zfcp_erp_wait(adapter);
581 }
582 556
583 return SUCCESS; 557 return SUCCESS;
584} 558}
@@ -607,7 +581,7 @@ zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
607 adapter->scsi_host->max_channel = 0; 581 adapter->scsi_host->max_channel = 0;
608 adapter->scsi_host->unique_id = unique_id++; /* FIXME */ 582 adapter->scsi_host->unique_id = unique_id++; /* FIXME */
609 adapter->scsi_host->max_cmd_len = ZFCP_MAX_SCSI_CMND_LENGTH; 583 adapter->scsi_host->max_cmd_len = ZFCP_MAX_SCSI_CMND_LENGTH;
610 adapter->scsi_host->transportt = zfcp_transport_template; 584 adapter->scsi_host->transportt = zfcp_data.scsi_transport_template;
611 585
612 /* 586 /*
613 * save a pointer to our own adapter data structure within 587 * save a pointer to our own adapter data structure within
@@ -648,16 +622,6 @@ zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
648 return; 622 return;
649} 623}
650 624
651
652void
653zfcp_fsf_start_scsi_er_timer(struct zfcp_adapter *adapter)
654{
655 adapter->scsi_er_timer.function = zfcp_fsf_scsi_er_timeout_handler;
656 adapter->scsi_er_timer.data = (unsigned long) adapter;
657 adapter->scsi_er_timer.expires = jiffies + ZFCP_SCSI_ER_TIMEOUT;
658 add_timer(&adapter->scsi_er_timer);
659}
660
661/* 625/*
662 * Support functions for FC transport class 626 * Support functions for FC transport class
663 */ 627 */
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
index d1c1e75bfd60..1e788e815ce7 100644
--- a/drivers/s390/sysinfo.c
+++ b/drivers/s390/sysinfo.c
@@ -11,19 +11,18 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <asm/ebcdic.h> 12#include <asm/ebcdic.h>
13 13
14struct sysinfo_1_1_1 14struct sysinfo_1_1_1 {
15{
16 char reserved_0[32]; 15 char reserved_0[32];
17 char manufacturer[16]; 16 char manufacturer[16];
18 char type[4]; 17 char type[4];
19 char reserved_1[12]; 18 char reserved_1[12];
20 char model[16]; 19 char model_capacity[16];
21 char sequence[16]; 20 char sequence[16];
22 char plant[4]; 21 char plant[4];
22 char model[16];
23}; 23};
24 24
25struct sysinfo_1_2_1 25struct sysinfo_1_2_1 {
26{
27 char reserved_0[80]; 26 char reserved_0[80];
28 char sequence[16]; 27 char sequence[16];
29 char plant[4]; 28 char plant[4];
@@ -31,9 +30,12 @@ struct sysinfo_1_2_1
31 unsigned short cpu_address; 30 unsigned short cpu_address;
32}; 31};
33 32
34struct sysinfo_1_2_2 33struct sysinfo_1_2_2 {
35{ 34 char format;
36 char reserved_0[32]; 35 char reserved_0[1];
36 unsigned short acc_offset;
37 char reserved_1[24];
38 unsigned int secondary_capability;
37 unsigned int capability; 39 unsigned int capability;
38 unsigned short cpus_total; 40 unsigned short cpus_total;
39 unsigned short cpus_configured; 41 unsigned short cpus_configured;
@@ -42,8 +44,12 @@ struct sysinfo_1_2_2
42 unsigned short adjustment[0]; 44 unsigned short adjustment[0];
43}; 45};
44 46
45struct sysinfo_2_2_1 47struct sysinfo_1_2_2_extension {
46{ 48 unsigned int alt_capability;
49 unsigned short alt_adjustment[0];
50};
51
52struct sysinfo_2_2_1 {
47 char reserved_0[80]; 53 char reserved_0[80];
48 char sequence[16]; 54 char sequence[16];
49 char plant[4]; 55 char plant[4];
@@ -51,15 +57,11 @@ struct sysinfo_2_2_1
51 unsigned short cpu_address; 57 unsigned short cpu_address;
52}; 58};
53 59
54struct sysinfo_2_2_2 60struct sysinfo_2_2_2 {
55{
56 char reserved_0[32]; 61 char reserved_0[32];
57 unsigned short lpar_number; 62 unsigned short lpar_number;
58 char reserved_1; 63 char reserved_1;
59 unsigned char characteristics; 64 unsigned char characteristics;
60 #define LPAR_CHAR_DEDICATED (1 << 7)
61 #define LPAR_CHAR_SHARED (1 << 6)
62 #define LPAR_CHAR_LIMITED (1 << 5)
63 unsigned short cpus_total; 65 unsigned short cpus_total;
64 unsigned short cpus_configured; 66 unsigned short cpus_configured;
65 unsigned short cpus_standby; 67 unsigned short cpus_standby;
@@ -71,12 +73,14 @@ struct sysinfo_2_2_2
71 unsigned short cpus_shared; 73 unsigned short cpus_shared;
72}; 74};
73 75
74struct sysinfo_3_2_2 76#define LPAR_CHAR_DEDICATED (1 << 7)
75{ 77#define LPAR_CHAR_SHARED (1 << 6)
78#define LPAR_CHAR_LIMITED (1 << 5)
79
80struct sysinfo_3_2_2 {
76 char reserved_0[31]; 81 char reserved_0[31];
77 unsigned char count; 82 unsigned char count;
78 struct 83 struct {
79 {
80 char reserved_0[4]; 84 char reserved_0[4];
81 unsigned short cpus_total; 85 unsigned short cpus_total;
82 unsigned short cpus_configured; 86 unsigned short cpus_configured;
@@ -90,136 +94,223 @@ struct sysinfo_3_2_2
90 } vm[8]; 94 } vm[8];
91}; 95};
92 96
93union s390_sysinfo 97static inline int stsi(void *sysinfo, int fc, int sel1, int sel2)
94{ 98{
95 struct sysinfo_1_1_1 sysinfo_1_1_1; 99 register int r0 asm("0") = (fc << 28) | sel1;
96 struct sysinfo_1_2_1 sysinfo_1_2_1; 100 register int r1 asm("1") = sel2;
97 struct sysinfo_1_2_2 sysinfo_1_2_2; 101
98 struct sysinfo_2_2_1 sysinfo_2_2_1; 102 asm volatile(
99 struct sysinfo_2_2_2 sysinfo_2_2_2; 103 " stsi 0(%2)\n"
100 struct sysinfo_3_2_2 sysinfo_3_2_2; 104 "0: jz 2f\n"
101}; 105 "1: lhi %0,%3\n"
102 106 "2:\n"
103static inline int stsi (void *sysinfo, 107 EX_TABLE(0b,1b)
104 int fc, int sel1, int sel2) 108 : "+d" (r0) : "d" (r1), "a" (sysinfo), "K" (-ENOSYS)
105{ 109 : "cc", "memory" );
106 int cc, retv; 110 return r0;
107
108#ifndef CONFIG_64BIT
109 __asm__ __volatile__ ( "lr\t0,%2\n"
110 "\tlr\t1,%3\n"
111 "\tstsi\t0(%4)\n"
112 "0:\tipm\t%0\n"
113 "\tsrl\t%0,28\n"
114 "1:lr\t%1,0\n"
115 ".section .fixup,\"ax\"\n"
116 "2:\tlhi\t%0,3\n"
117 "\tbras\t1,3f\n"
118 "\t.long 1b\n"
119 "3:\tl\t1,0(1)\n"
120 "\tbr\t1\n"
121 ".previous\n"
122 ".section __ex_table,\"a\"\n"
123 "\t.align 4\n"
124 "\t.long 0b,2b\n"
125 ".previous\n"
126 : "=d" (cc), "=d" (retv)
127 : "d" ((fc << 28) | sel1), "d" (sel2), "a" (sysinfo)
128 : "cc", "memory", "0", "1" );
129#else
130 __asm__ __volatile__ ( "lr\t0,%2\n"
131 "lr\t1,%3\n"
132 "\tstsi\t0(%4)\n"
133 "0:\tipm\t%0\n"
134 "\tsrl\t%0,28\n"
135 "1:lr\t%1,0\n"
136 ".section .fixup,\"ax\"\n"
137 "2:\tlhi\t%0,3\n"
138 "\tjg\t1b\n"
139 ".previous\n"
140 ".section __ex_table,\"a\"\n"
141 "\t.align 8\n"
142 "\t.quad 0b,2b\n"
143 ".previous\n"
144 : "=d" (cc), "=d" (retv)
145 : "d" ((fc << 28) | sel1), "d" (sel2), "a" (sysinfo)
146 : "cc", "memory", "0", "1" );
147#endif
148
149 return cc? -1 : retv;
150} 111}
151 112
152static inline int stsi_0 (void) 113static inline int stsi_0(void)
153{ 114{
154 int rc = stsi (NULL, 0, 0, 0); 115 int rc = stsi (NULL, 0, 0, 0);
155 return rc == -1 ? rc : (((unsigned int)rc) >> 28); 116 return rc == -ENOSYS ? rc : (((unsigned int) rc) >> 28);
156} 117}
157 118
158static inline int stsi_1_1_1 (struct sysinfo_1_1_1 *info) 119static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len)
159{ 120{
160 int rc = stsi (info, 1, 1, 1); 121 if (stsi(info, 1, 1, 1) == -ENOSYS)
161 if (rc != -1) 122 return len;
162 { 123
163 EBCASC (info->manufacturer, sizeof(info->manufacturer)); 124 EBCASC(info->manufacturer, sizeof(info->manufacturer));
164 EBCASC (info->type, sizeof(info->type)); 125 EBCASC(info->type, sizeof(info->type));
165 EBCASC (info->model, sizeof(info->model)); 126 EBCASC(info->model, sizeof(info->model));
166 EBCASC (info->sequence, sizeof(info->sequence)); 127 EBCASC(info->sequence, sizeof(info->sequence));
167 EBCASC (info->plant, sizeof(info->plant)); 128 EBCASC(info->plant, sizeof(info->plant));
168 } 129 EBCASC(info->model_capacity, sizeof(info->model_capacity));
169 return rc == -1 ? rc : 0; 130 len += sprintf(page + len, "Manufacturer: %-16.16s\n",
131 info->manufacturer);
132 len += sprintf(page + len, "Type: %-4.4s\n",
133 info->type);
134 if (info->model[0] != '\0')
135 /*
136 * Sigh: the model field has been renamed with System z9
137 * to model_capacity and a new model field has been added
138 * after the plant field. To avoid confusing older programs
139 * the "Model:" prints "model_capacity model" or just
140 * "model_capacity" if the model string is empty .
141 */
142 len += sprintf(page + len,
143 "Model: %-16.16s %-16.16s\n",
144 info->model_capacity, info->model);
145 else
146 len += sprintf(page + len, "Model: %-16.16s\n",
147 info->model_capacity);
148 len += sprintf(page + len, "Sequence Code: %-16.16s\n",
149 info->sequence);
150 len += sprintf(page + len, "Plant: %-4.4s\n",
151 info->plant);
152 len += sprintf(page + len, "Model Capacity: %-16.16s\n",
153 info->model_capacity);
154 return len;
170} 155}
171 156
172static inline int stsi_1_2_1 (struct sysinfo_1_2_1 *info) 157#if 0 /* Currently unused */
158static int stsi_1_2_1(struct sysinfo_1_2_1 *info, char *page, int len)
173{ 159{
174 int rc = stsi (info, 1, 2, 1); 160 if (stsi(info, 1, 2, 1) == -ENOSYS)
175 if (rc != -1) 161 return len;
176 { 162
177 EBCASC (info->sequence, sizeof(info->sequence)); 163 len += sprintf(page + len, "\n");
178 EBCASC (info->plant, sizeof(info->plant)); 164 EBCASC(info->sequence, sizeof(info->sequence));
179 } 165 EBCASC(info->plant, sizeof(info->plant));
180 return rc == -1 ? rc : 0; 166 len += sprintf(page + len, "Sequence Code of CPU: %-16.16s\n",
167 info->sequence);
168 len += sprintf(page + len, "Plant of CPU: %-16.16s\n",
169 info->plant);
170 return len;
181} 171}
172#endif
182 173
183static inline int stsi_1_2_2 (struct sysinfo_1_2_2 *info) 174static int stsi_1_2_2(struct sysinfo_1_2_2 *info, char *page, int len)
184{ 175{
185 int rc = stsi (info, 1, 2, 2); 176 struct sysinfo_1_2_2_extension *ext;
186 return rc == -1 ? rc : 0; 177 int i;
178
179 if (stsi(info, 1, 2, 2) == -ENOSYS)
180 return len;
181 ext = (struct sysinfo_1_2_2_extension *)
182 ((unsigned long) info + info->acc_offset);
183
184 len += sprintf(page + len, "\n");
185 len += sprintf(page + len, "CPUs Total: %d\n",
186 info->cpus_total);
187 len += sprintf(page + len, "CPUs Configured: %d\n",
188 info->cpus_configured);
189 len += sprintf(page + len, "CPUs Standby: %d\n",
190 info->cpus_standby);
191 len += sprintf(page + len, "CPUs Reserved: %d\n",
192 info->cpus_reserved);
193
194 if (info->format == 1) {
195 /*
196 * Sigh 2. According to the specification the alternate
197 * capability field is a 32 bit floating point number
198 * if the higher order 8 bits are not zero. Printing
199 * a floating point number in the kernel is a no-no,
200 * always print the number as 32 bit unsigned integer.
201 * The user-space needs to know about the stange
202 * encoding of the alternate cpu capability.
203 */
204 len += sprintf(page + len, "Capability: %u %u\n",
205 info->capability, ext->alt_capability);
206 for (i = 2; i <= info->cpus_total; i++)
207 len += sprintf(page + len,
208 "Adjustment %02d-way: %u %u\n",
209 i, info->adjustment[i-2],
210 ext->alt_adjustment[i-2]);
211
212 } else {
213 len += sprintf(page + len, "Capability: %u\n",
214 info->capability);
215 for (i = 2; i <= info->cpus_total; i++)
216 len += sprintf(page + len,
217 "Adjustment %02d-way: %u\n",
218 i, info->adjustment[i-2]);
219 }
220
221 if (info->secondary_capability != 0)
222 len += sprintf(page + len, "Secondary Capability: %d\n",
223 info->secondary_capability);
224
225 return len;
187} 226}
188 227
189static inline int stsi_2_2_1 (struct sysinfo_2_2_1 *info) 228#if 0 /* Currently unused */
229static int stsi_2_2_1(struct sysinfo_2_2_1 *info, char *page, int len)
190{ 230{
191 int rc = stsi (info, 2, 2, 1); 231 if (stsi(info, 2, 2, 1) == -ENOSYS)
192 if (rc != -1) 232 return len;
193 { 233
194 EBCASC (info->sequence, sizeof(info->sequence)); 234 len += sprintf(page + len, "\n");
195 EBCASC (info->plant, sizeof(info->plant)); 235 EBCASC (info->sequence, sizeof(info->sequence));
196 } 236 EBCASC (info->plant, sizeof(info->plant));
197 return rc == -1 ? rc : 0; 237 len += sprintf(page + len, "Sequence Code of logical CPU: %-16.16s\n",
238 info->sequence);
239 len += sprintf(page + len, "Plant of logical CPU: %-16.16s\n",
240 info->plant);
241 return len;
198} 242}
243#endif
199 244
200static inline int stsi_2_2_2 (struct sysinfo_2_2_2 *info) 245static int stsi_2_2_2(struct sysinfo_2_2_2 *info, char *page, int len)
201{ 246{
202 int rc = stsi (info, 2, 2, 2); 247 if (stsi(info, 2, 2, 2) == -ENOSYS)
203 if (rc != -1) 248 return len;
204 { 249
205 EBCASC (info->name, sizeof(info->name)); 250 EBCASC (info->name, sizeof(info->name));
206 } 251
207 return rc == -1 ? rc : 0; 252 len += sprintf(page + len, "\n");
253 len += sprintf(page + len, "LPAR Number: %d\n",
254 info->lpar_number);
255
256 len += sprintf(page + len, "LPAR Characteristics: ");
257 if (info->characteristics & LPAR_CHAR_DEDICATED)
258 len += sprintf(page + len, "Dedicated ");
259 if (info->characteristics & LPAR_CHAR_SHARED)
260 len += sprintf(page + len, "Shared ");
261 if (info->characteristics & LPAR_CHAR_LIMITED)
262 len += sprintf(page + len, "Limited ");
263 len += sprintf(page + len, "\n");
264
265 len += sprintf(page + len, "LPAR Name: %-8.8s\n",
266 info->name);
267
268 len += sprintf(page + len, "LPAR Adjustment: %d\n",
269 info->caf);
270
271 len += sprintf(page + len, "LPAR CPUs Total: %d\n",
272 info->cpus_total);
273 len += sprintf(page + len, "LPAR CPUs Configured: %d\n",
274 info->cpus_configured);
275 len += sprintf(page + len, "LPAR CPUs Standby: %d\n",
276 info->cpus_standby);
277 len += sprintf(page + len, "LPAR CPUs Reserved: %d\n",
278 info->cpus_reserved);
279 len += sprintf(page + len, "LPAR CPUs Dedicated: %d\n",
280 info->cpus_dedicated);
281 len += sprintf(page + len, "LPAR CPUs Shared: %d\n",
282 info->cpus_shared);
283 return len;
208} 284}
209 285
210static inline int stsi_3_2_2 (struct sysinfo_3_2_2 *info) 286static int stsi_3_2_2(struct sysinfo_3_2_2 *info, char *page, int len)
211{ 287{
212 int rc = stsi (info, 3, 2, 2); 288 int i;
213 if (rc != -1) 289
214 { 290 if (stsi(info, 3, 2, 2) == -ENOSYS)
215 int i; 291 return len;
216 for (i = 0; i < info->count; i++) 292 for (i = 0; i < info->count; i++) {
217 { 293 EBCASC (info->vm[i].name, sizeof(info->vm[i].name));
218 EBCASC (info->vm[i].name, sizeof(info->vm[i].name)); 294 EBCASC (info->vm[i].cpi, sizeof(info->vm[i].cpi));
219 EBCASC (info->vm[i].cpi, sizeof(info->vm[i].cpi)); 295 len += sprintf(page + len, "\n");
220 } 296 len += sprintf(page + len, "VM%02d Name: %-8.8s\n",
297 i, info->vm[i].name);
298 len += sprintf(page + len, "VM%02d Control Program: %-16.16s\n",
299 i, info->vm[i].cpi);
300
301 len += sprintf(page + len, "VM%02d Adjustment: %d\n",
302 i, info->vm[i].caf);
303
304 len += sprintf(page + len, "VM%02d CPUs Total: %d\n",
305 i, info->vm[i].cpus_total);
306 len += sprintf(page + len, "VM%02d CPUs Configured: %d\n",
307 i, info->vm[i].cpus_configured);
308 len += sprintf(page + len, "VM%02d CPUs Standby: %d\n",
309 i, info->vm[i].cpus_standby);
310 len += sprintf(page + len, "VM%02d CPUs Reserved: %d\n",
311 i, info->vm[i].cpus_reserved);
221 } 312 }
222 return rc == -1 ? rc : 0; 313 return len;
223} 314}
224 315
225 316
@@ -227,118 +318,34 @@ static int proc_read_sysinfo(char *page, char **start,
227 off_t off, int count, 318 off_t off, int count,
228 int *eof, void *data) 319 int *eof, void *data)
229{ 320{
230 unsigned long info_page = get_zeroed_page (GFP_KERNEL); 321 unsigned long info = get_zeroed_page (GFP_KERNEL);
231 union s390_sysinfo *info = (union s390_sysinfo *) info_page; 322 int level, len;
232 int len = 0;
233 int level;
234 int i;
235 323
236 if (!info) 324 if (!info)
237 return 0; 325 return 0;
238 326
239 level = stsi_0 (); 327 len = 0;
240 328 level = stsi_0();
241 if (level >= 1 && stsi_1_1_1 (&info->sysinfo_1_1_1) == 0) 329 if (level >= 1)
242 { 330 len = stsi_1_1_1((struct sysinfo_1_1_1 *) info, page, len);
243 len += sprintf (page+len, "Manufacturer: %-16.16s\n",
244 info->sysinfo_1_1_1.manufacturer);
245 len += sprintf (page+len, "Type: %-4.4s\n",
246 info->sysinfo_1_1_1.type);
247 len += sprintf (page+len, "Model: %-16.16s\n",
248 info->sysinfo_1_1_1.model);
249 len += sprintf (page+len, "Sequence Code: %-16.16s\n",
250 info->sysinfo_1_1_1.sequence);
251 len += sprintf (page+len, "Plant: %-4.4s\n",
252 info->sysinfo_1_1_1.plant);
253 }
254
255 if (level >= 1 && stsi_1_2_2 (&info->sysinfo_1_2_2) == 0)
256 {
257 len += sprintf (page+len, "\n");
258 len += sprintf (page+len, "CPUs Total: %d\n",
259 info->sysinfo_1_2_2.cpus_total);
260 len += sprintf (page+len, "CPUs Configured: %d\n",
261 info->sysinfo_1_2_2.cpus_configured);
262 len += sprintf (page+len, "CPUs Standby: %d\n",
263 info->sysinfo_1_2_2.cpus_standby);
264 len += sprintf (page+len, "CPUs Reserved: %d\n",
265 info->sysinfo_1_2_2.cpus_reserved);
266
267 len += sprintf (page+len, "Capability: %d\n",
268 info->sysinfo_1_2_2.capability);
269 331
270 for (i = 2; i <= info->sysinfo_1_2_2.cpus_total; i++) 332 if (level >= 1)
271 len += sprintf (page+len, "Adjustment %02d-way: %d\n", 333 len = stsi_1_2_2((struct sysinfo_1_2_2 *) info, page, len);
272 i, info->sysinfo_1_2_2.adjustment[i-2]);
273 }
274 334
275 if (level >= 2 && stsi_2_2_2 (&info->sysinfo_2_2_2) == 0) 335 if (level >= 2)
276 { 336 len = stsi_2_2_2((struct sysinfo_2_2_2 *) info, page, len);
277 len += sprintf (page+len, "\n");
278 len += sprintf (page+len, "LPAR Number: %d\n",
279 info->sysinfo_2_2_2.lpar_number);
280
281 len += sprintf (page+len, "LPAR Characteristics: ");
282 if (info->sysinfo_2_2_2.characteristics & LPAR_CHAR_DEDICATED)
283 len += sprintf (page+len, "Dedicated ");
284 if (info->sysinfo_2_2_2.characteristics & LPAR_CHAR_SHARED)
285 len += sprintf (page+len, "Shared ");
286 if (info->sysinfo_2_2_2.characteristics & LPAR_CHAR_LIMITED)
287 len += sprintf (page+len, "Limited ");
288 len += sprintf (page+len, "\n");
289
290 len += sprintf (page+len, "LPAR Name: %-8.8s\n",
291 info->sysinfo_2_2_2.name);
292
293 len += sprintf (page+len, "LPAR Adjustment: %d\n",
294 info->sysinfo_2_2_2.caf);
295
296 len += sprintf (page+len, "LPAR CPUs Total: %d\n",
297 info->sysinfo_2_2_2.cpus_total);
298 len += sprintf (page+len, "LPAR CPUs Configured: %d\n",
299 info->sysinfo_2_2_2.cpus_configured);
300 len += sprintf (page+len, "LPAR CPUs Standby: %d\n",
301 info->sysinfo_2_2_2.cpus_standby);
302 len += sprintf (page+len, "LPAR CPUs Reserved: %d\n",
303 info->sysinfo_2_2_2.cpus_reserved);
304 len += sprintf (page+len, "LPAR CPUs Dedicated: %d\n",
305 info->sysinfo_2_2_2.cpus_dedicated);
306 len += sprintf (page+len, "LPAR CPUs Shared: %d\n",
307 info->sysinfo_2_2_2.cpus_shared);
308 }
309 337
310 if (level >= 3 && stsi_3_2_2 (&info->sysinfo_3_2_2) == 0) 338 if (level >= 3)
311 { 339 len = stsi_3_2_2((struct sysinfo_3_2_2 *) info, page, len);
312 for (i = 0; i < info->sysinfo_3_2_2.count; i++)
313 {
314 len += sprintf (page+len, "\n");
315 len += sprintf (page+len, "VM%02d Name: %-8.8s\n",
316 i, info->sysinfo_3_2_2.vm[i].name);
317 len += sprintf (page+len, "VM%02d Control Program: %-16.16s\n",
318 i, info->sysinfo_3_2_2.vm[i].cpi);
319
320 len += sprintf (page+len, "VM%02d Adjustment: %d\n",
321 i, info->sysinfo_3_2_2.vm[i].caf);
322
323 len += sprintf (page+len, "VM%02d CPUs Total: %d\n",
324 i, info->sysinfo_3_2_2.vm[i].cpus_total);
325 len += sprintf (page+len, "VM%02d CPUs Configured: %d\n",
326 i, info->sysinfo_3_2_2.vm[i].cpus_configured);
327 len += sprintf (page+len, "VM%02d CPUs Standby: %d\n",
328 i, info->sysinfo_3_2_2.vm[i].cpus_standby);
329 len += sprintf (page+len, "VM%02d CPUs Reserved: %d\n",
330 i, info->sysinfo_3_2_2.vm[i].cpus_reserved);
331 }
332 }
333 340
334 free_page (info_page); 341 free_page (info);
335 return len; 342 return len;
336} 343}
337 344
338static __init int create_proc_sysinfo(void) 345static __init int create_proc_sysinfo(void)
339{ 346{
340 create_proc_read_entry ("sysinfo", 0444, NULL, 347 create_proc_read_entry("sysinfo", 0444, NULL,
341 proc_read_sysinfo, NULL); 348 proc_read_sysinfo, NULL);
342 return 0; 349 return 0;
343} 350}
344 351
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 16a12a3b7b2b..4ea49fd7965e 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -662,7 +662,7 @@ static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAd
662 particular standard ISA I/O Address need not be probed. 662 particular standard ISA I/O Address need not be probed.
663 */ 663 */
664 PrimaryProbeInfo->IO_Address = 0; 664 PrimaryProbeInfo->IO_Address = 0;
665 while ((PCI_Device = pci_find_device(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER, PCI_Device)) != NULL) { 665 while ((PCI_Device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER, PCI_Device)) != NULL) {
666 struct BusLogic_HostAdapter *HostAdapter = PrototypeHostAdapter; 666 struct BusLogic_HostAdapter *HostAdapter = PrototypeHostAdapter;
667 struct BusLogic_PCIHostAdapterInformation PCIHostAdapterInformation; 667 struct BusLogic_PCIHostAdapterInformation PCIHostAdapterInformation;
668 enum BusLogic_ISACompatibleIOPort ModifyIOAddressRequest; 668 enum BusLogic_ISACompatibleIOPort ModifyIOAddressRequest;
@@ -762,7 +762,7 @@ static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAd
762 PrimaryProbeInfo->Bus = Bus; 762 PrimaryProbeInfo->Bus = Bus;
763 PrimaryProbeInfo->Device = Device; 763 PrimaryProbeInfo->Device = Device;
764 PrimaryProbeInfo->IRQ_Channel = IRQ_Channel; 764 PrimaryProbeInfo->IRQ_Channel = IRQ_Channel;
765 PrimaryProbeInfo->PCI_Device = PCI_Device; 765 PrimaryProbeInfo->PCI_Device = pci_dev_get(PCI_Device);
766 PCIMultiMasterCount++; 766 PCIMultiMasterCount++;
767 } else if (BusLogic_ProbeInfoCount < BusLogic_MaxHostAdapters) { 767 } else if (BusLogic_ProbeInfoCount < BusLogic_MaxHostAdapters) {
768 struct BusLogic_ProbeInfo *ProbeInfo = &BusLogic_ProbeInfoList[BusLogic_ProbeInfoCount++]; 768 struct BusLogic_ProbeInfo *ProbeInfo = &BusLogic_ProbeInfoList[BusLogic_ProbeInfoCount++];
@@ -773,7 +773,7 @@ static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAd
773 ProbeInfo->Bus = Bus; 773 ProbeInfo->Bus = Bus;
774 ProbeInfo->Device = Device; 774 ProbeInfo->Device = Device;
775 ProbeInfo->IRQ_Channel = IRQ_Channel; 775 ProbeInfo->IRQ_Channel = IRQ_Channel;
776 ProbeInfo->PCI_Device = PCI_Device; 776 ProbeInfo->PCI_Device = pci_dev_get(PCI_Device);
777 NonPrimaryPCIMultiMasterCount++; 777 NonPrimaryPCIMultiMasterCount++;
778 PCIMultiMasterCount++; 778 PCIMultiMasterCount++;
779 } else 779 } else
@@ -823,7 +823,7 @@ static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAd
823 noting the PCI bus location and assigned IRQ Channel. 823 noting the PCI bus location and assigned IRQ Channel.
824 */ 824 */
825 PCI_Device = NULL; 825 PCI_Device = NULL;
826 while ((PCI_Device = pci_find_device(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC, PCI_Device)) != NULL) { 826 while ((PCI_Device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC, PCI_Device)) != NULL) {
827 unsigned char Bus; 827 unsigned char Bus;
828 unsigned char Device; 828 unsigned char Device;
829 unsigned int IRQ_Channel; 829 unsigned int IRQ_Channel;
@@ -850,7 +850,7 @@ static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAd
850 ProbeInfo->Bus = Bus; 850 ProbeInfo->Bus = Bus;
851 ProbeInfo->Device = Device; 851 ProbeInfo->Device = Device;
852 ProbeInfo->IRQ_Channel = IRQ_Channel; 852 ProbeInfo->IRQ_Channel = IRQ_Channel;
853 ProbeInfo->PCI_Device = PCI_Device; 853 ProbeInfo->PCI_Device = pci_dev_get(PCI_Device);
854 break; 854 break;
855 } 855 }
856 } 856 }
@@ -874,7 +874,7 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
874 /* 874 /*
875 Interrogate PCI Configuration Space for any FlashPoint Host Adapters. 875 Interrogate PCI Configuration Space for any FlashPoint Host Adapters.
876 */ 876 */
877 while ((PCI_Device = pci_find_device(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT, PCI_Device)) != NULL) { 877 while ((PCI_Device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT, PCI_Device)) != NULL) {
878 unsigned char Bus; 878 unsigned char Bus;
879 unsigned char Device; 879 unsigned char Device;
880 unsigned int IRQ_Channel; 880 unsigned int IRQ_Channel;
@@ -923,7 +923,7 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
923 ProbeInfo->Bus = Bus; 923 ProbeInfo->Bus = Bus;
924 ProbeInfo->Device = Device; 924 ProbeInfo->Device = Device;
925 ProbeInfo->IRQ_Channel = IRQ_Channel; 925 ProbeInfo->IRQ_Channel = IRQ_Channel;
926 ProbeInfo->PCI_Device = PCI_Device; 926 ProbeInfo->PCI_Device = pci_dev_get(PCI_Device);
927 FlashPointCount++; 927 FlashPointCount++;
928 } else 928 } else
929 BusLogic_Warning("BusLogic: Too many Host Adapters " "detected\n", NULL); 929 BusLogic_Warning("BusLogic: Too many Host Adapters " "detected\n", NULL);
@@ -1890,6 +1890,7 @@ static void BusLogic_ReleaseResources(struct BusLogic_HostAdapter *HostAdapter)
1890 */ 1890 */
1891 if (HostAdapter->MailboxSpace) 1891 if (HostAdapter->MailboxSpace)
1892 pci_free_consistent(HostAdapter->PCI_Device, HostAdapter->MailboxSize, HostAdapter->MailboxSpace, HostAdapter->MailboxSpaceHandle); 1892 pci_free_consistent(HostAdapter->PCI_Device, HostAdapter->MailboxSize, HostAdapter->MailboxSpace, HostAdapter->MailboxSpaceHandle);
1893 pci_dev_put(HostAdapter->PCI_Device);
1893 HostAdapter->MailboxSpace = NULL; 1894 HostAdapter->MailboxSpace = NULL;
1894 HostAdapter->MailboxSpaceHandle = 0; 1895 HostAdapter->MailboxSpaceHandle = 0;
1895 HostAdapter->MailboxSize = 0; 1896 HostAdapter->MailboxSize = 0;
@@ -2176,6 +2177,7 @@ static int __init BusLogic_init(void)
2176{ 2177{
2177 int BusLogicHostAdapterCount = 0, DriverOptionsIndex = 0, ProbeIndex; 2178 int BusLogicHostAdapterCount = 0, DriverOptionsIndex = 0, ProbeIndex;
2178 struct BusLogic_HostAdapter *PrototypeHostAdapter; 2179 struct BusLogic_HostAdapter *PrototypeHostAdapter;
2180 int ret = 0;
2179 2181
2180#ifdef MODULE 2182#ifdef MODULE
2181 if (BusLogic) 2183 if (BusLogic)
@@ -2282,25 +2284,49 @@ static int __init BusLogic_init(void)
2282 perform Target Device Inquiry. 2284 perform Target Device Inquiry.
2283 */ 2285 */
2284 if (BusLogic_ReadHostAdapterConfiguration(HostAdapter) && 2286 if (BusLogic_ReadHostAdapterConfiguration(HostAdapter) &&
2285 BusLogic_ReportHostAdapterConfiguration(HostAdapter) && BusLogic_AcquireResources(HostAdapter) && BusLogic_CreateInitialCCBs(HostAdapter) && BusLogic_InitializeHostAdapter(HostAdapter) && BusLogic_TargetDeviceInquiry(HostAdapter)) { 2287 BusLogic_ReportHostAdapterConfiguration(HostAdapter) &&
2288 BusLogic_AcquireResources(HostAdapter) &&
2289 BusLogic_CreateInitialCCBs(HostAdapter) &&
2290 BusLogic_InitializeHostAdapter(HostAdapter) &&
2291 BusLogic_TargetDeviceInquiry(HostAdapter)) {
2286 /* 2292 /*
2287 Initialization has been completed successfully. Release and 2293 Initialization has been completed successfully. Release and
2288 re-register usage of the I/O Address range so that the Model 2294 re-register usage of the I/O Address range so that the Model
2289 Name of the Host Adapter will appear, and initialize the SCSI 2295 Name of the Host Adapter will appear, and initialize the SCSI
2290 Host structure. 2296 Host structure.
2291 */ 2297 */
2292 release_region(HostAdapter->IO_Address, HostAdapter->AddressCount); 2298 release_region(HostAdapter->IO_Address,
2293 if (!request_region(HostAdapter->IO_Address, HostAdapter->AddressCount, HostAdapter->FullModelName)) { 2299 HostAdapter->AddressCount);
2294 printk(KERN_WARNING "BusLogic: Release and re-register of " "port 0x%04lx failed \n", (unsigned long) HostAdapter->IO_Address); 2300 if (!request_region(HostAdapter->IO_Address,
2301 HostAdapter->AddressCount,
2302 HostAdapter->FullModelName)) {
2303 printk(KERN_WARNING
2304 "BusLogic: Release and re-register of "
2305 "port 0x%04lx failed \n",
2306 (unsigned long)HostAdapter->IO_Address);
2295 BusLogic_DestroyCCBs(HostAdapter); 2307 BusLogic_DestroyCCBs(HostAdapter);
2296 BusLogic_ReleaseResources(HostAdapter); 2308 BusLogic_ReleaseResources(HostAdapter);
2297 list_del(&HostAdapter->host_list); 2309 list_del(&HostAdapter->host_list);
2298 scsi_host_put(Host); 2310 scsi_host_put(Host);
2311 ret = -ENOMEM;
2299 } else { 2312 } else {
2300 BusLogic_InitializeHostStructure(HostAdapter, Host); 2313 BusLogic_InitializeHostStructure(HostAdapter,
2301 scsi_add_host(Host, HostAdapter->PCI_Device ? &HostAdapter->PCI_Device->dev : NULL); 2314 Host);
2302 scsi_scan_host(Host); 2315 if (scsi_add_host(Host, HostAdapter->PCI_Device
2303 BusLogicHostAdapterCount++; 2316 ? &HostAdapter->PCI_Device->dev
2317 : NULL)) {
2318 printk(KERN_WARNING
2319 "BusLogic: scsi_add_host()"
2320 "failed!\n");
2321 BusLogic_DestroyCCBs(HostAdapter);
2322 BusLogic_ReleaseResources(HostAdapter);
2323 list_del(&HostAdapter->host_list);
2324 scsi_host_put(Host);
2325 ret = -ENODEV;
2326 } else {
2327 scsi_scan_host(Host);
2328 BusLogicHostAdapterCount++;
2329 }
2304 } 2330 }
2305 } else { 2331 } else {
2306 /* 2332 /*
@@ -2315,12 +2341,13 @@ static int __init BusLogic_init(void)
2315 BusLogic_ReleaseResources(HostAdapter); 2341 BusLogic_ReleaseResources(HostAdapter);
2316 list_del(&HostAdapter->host_list); 2342 list_del(&HostAdapter->host_list);
2317 scsi_host_put(Host); 2343 scsi_host_put(Host);
2344 ret = -ENODEV;
2318 } 2345 }
2319 } 2346 }
2320 kfree(PrototypeHostAdapter); 2347 kfree(PrototypeHostAdapter);
2321 kfree(BusLogic_ProbeInfoList); 2348 kfree(BusLogic_ProbeInfoList);
2322 BusLogic_ProbeInfoList = NULL; 2349 BusLogic_ProbeInfoList = NULL;
2323 return 0; 2350 return ret;
2324} 2351}
2325 2352
2326 2353
@@ -2954,6 +2981,7 @@ static int BusLogic_QueueCommand(struct scsi_cmnd *Command, void (*CompletionRou
2954} 2981}
2955 2982
2956 2983
2984#if 0
2957/* 2985/*
2958 BusLogic_AbortCommand aborts Command if possible. 2986 BusLogic_AbortCommand aborts Command if possible.
2959*/ 2987*/
@@ -3024,6 +3052,7 @@ static int BusLogic_AbortCommand(struct scsi_cmnd *Command)
3024 return SUCCESS; 3052 return SUCCESS;
3025} 3053}
3026 3054
3055#endif
3027/* 3056/*
3028 BusLogic_ResetHostAdapter resets Host Adapter if possible, marking all 3057 BusLogic_ResetHostAdapter resets Host Adapter if possible, marking all
3029 currently executing SCSI Commands as having been Reset. 3058 currently executing SCSI Commands as having been Reset.
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 2df4d15c9634..c4dfcc91ddda 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -27,6 +27,11 @@ config SCSI
27 However, do not compile this as a module if your root file system 27 However, do not compile this as a module if your root file system
28 (the one containing the directory /) is located on a SCSI device. 28 (the one containing the directory /) is located on a SCSI device.
29 29
30config SCSI_NETLINK
31 bool
32 default n
33 select NET
34
30config SCSI_PROC_FS 35config SCSI_PROC_FS
31 bool "legacy /proc/scsi/ support" 36 bool "legacy /proc/scsi/ support"
32 depends on SCSI && PROC_FS 37 depends on SCSI && PROC_FS
@@ -209,7 +214,7 @@ config SCSI_LOGGING
209 there should be no noticeable performance impact as long as you have 214 there should be no noticeable performance impact as long as you have
210 logging turned off. 215 logging turned off.
211 216
212menu "SCSI Transport Attributes" 217menu "SCSI Transports"
213 depends on SCSI 218 depends on SCSI
214 219
215config SCSI_SPI_ATTRS 220config SCSI_SPI_ATTRS
@@ -222,6 +227,7 @@ config SCSI_SPI_ATTRS
222config SCSI_FC_ATTRS 227config SCSI_FC_ATTRS
223 tristate "FiberChannel Transport Attributes" 228 tristate "FiberChannel Transport Attributes"
224 depends on SCSI 229 depends on SCSI
230 select SCSI_NETLINK
225 help 231 help
226 If you wish to export transport-specific information about 232 If you wish to export transport-specific information about
227 each attached FiberChannel device to sysfs, say Y. 233 each attached FiberChannel device to sysfs, say Y.
@@ -242,6 +248,8 @@ config SCSI_SAS_ATTRS
242 If you wish to export transport-specific information about 248 If you wish to export transport-specific information about
243 each attached SAS device to sysfs, say Y. 249 each attached SAS device to sysfs, say Y.
244 250
251source "drivers/scsi/libsas/Kconfig"
252
245endmenu 253endmenu
246 254
247menu "SCSI low-level drivers" 255menu "SCSI low-level drivers"
@@ -431,6 +439,7 @@ config SCSI_AIC7XXX_OLD
431 module will be called aic7xxx_old. 439 module will be called aic7xxx_old.
432 440
433source "drivers/scsi/aic7xxx/Kconfig.aic79xx" 441source "drivers/scsi/aic7xxx/Kconfig.aic79xx"
442source "drivers/scsi/aic94xx/Kconfig"
434 443
435# All the I2O code and drivers do not seem to be 64bit safe. 444# All the I2O code and drivers do not seem to be 64bit safe.
436config SCSI_DPT_I2O 445config SCSI_DPT_I2O
@@ -469,6 +478,20 @@ config SCSI_IN2000
469 To compile this driver as a module, choose M here: the 478 To compile this driver as a module, choose M here: the
470 module will be called in2000. 479 module will be called in2000.
471 480
481config SCSI_ARCMSR
482 tristate "ARECA ARC11X0[PCI-X]/ARC12X0[PCI-EXPRESS] SATA-RAID support"
483 depends on PCI && SCSI
484 help
485 This driver supports all of ARECA's SATA RAID controller cards.
486 This is an ARECA-maintained driver by Erich Chen.
487 If you have any problems, please mail to: < erich@areca.com.tw >
488 Areca supports Linux RAID config tools.
489
490 < http://www.areca.com.tw >
491
492 To compile this driver as a module, choose M here: the
493 module will be called arcmsr (modprobe arcmsr).
494
472source "drivers/scsi/megaraid/Kconfig.megaraid" 495source "drivers/scsi/megaraid/Kconfig.megaraid"
473 496
474config SCSI_HPTIOP 497config SCSI_HPTIOP
@@ -915,6 +938,13 @@ config 53C700_LE_ON_BE
915 depends on SCSI_LASI700 938 depends on SCSI_LASI700
916 default y 939 default y
917 940
941config SCSI_STEX
942 tristate "Promise SuperTrak EX Series support"
943 depends on PCI && SCSI
944 ---help---
945 This driver supports Promise SuperTrak EX8350/8300/16350/16300
946 Storage controllers.
947
918config SCSI_SYM53C8XX_2 948config SCSI_SYM53C8XX_2
919 tristate "SYM53C8XX Version 2 SCSI support" 949 tristate "SYM53C8XX Version 2 SCSI support"
920 depends on PCI && SCSI 950 depends on PCI && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index b678f957cfe2..1ef951be7a5d 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_SCSI_SPI_ATTRS) += scsi_transport_spi.o
32obj-$(CONFIG_SCSI_FC_ATTRS) += scsi_transport_fc.o 32obj-$(CONFIG_SCSI_FC_ATTRS) += scsi_transport_fc.o
33obj-$(CONFIG_SCSI_ISCSI_ATTRS) += scsi_transport_iscsi.o 33obj-$(CONFIG_SCSI_ISCSI_ATTRS) += scsi_transport_iscsi.o
34obj-$(CONFIG_SCSI_SAS_ATTRS) += scsi_transport_sas.o 34obj-$(CONFIG_SCSI_SAS_ATTRS) += scsi_transport_sas.o
35obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/
35 36
36obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o 37obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o
37obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o 38obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
@@ -59,6 +60,7 @@ obj-$(CONFIG_SCSI_PSI240I) += psi240i.o
59obj-$(CONFIG_SCSI_BUSLOGIC) += BusLogic.o 60obj-$(CONFIG_SCSI_BUSLOGIC) += BusLogic.o
60obj-$(CONFIG_SCSI_DPT_I2O) += dpt_i2o.o 61obj-$(CONFIG_SCSI_DPT_I2O) += dpt_i2o.o
61obj-$(CONFIG_SCSI_U14_34F) += u14-34f.o 62obj-$(CONFIG_SCSI_U14_34F) += u14-34f.o
63obj-$(CONFIG_SCSI_ARCMSR) += arcmsr/
62obj-$(CONFIG_SCSI_ULTRASTOR) += ultrastor.o 64obj-$(CONFIG_SCSI_ULTRASTOR) += ultrastor.o
63obj-$(CONFIG_SCSI_AHA152X) += aha152x.o 65obj-$(CONFIG_SCSI_AHA152X) += aha152x.o
64obj-$(CONFIG_SCSI_AHA1542) += aha1542.o 66obj-$(CONFIG_SCSI_AHA1542) += aha1542.o
@@ -67,6 +69,7 @@ obj-$(CONFIG_SCSI_AIC7XXX) += aic7xxx/
67obj-$(CONFIG_SCSI_AIC79XX) += aic7xxx/ 69obj-$(CONFIG_SCSI_AIC79XX) += aic7xxx/
68obj-$(CONFIG_SCSI_AACRAID) += aacraid/ 70obj-$(CONFIG_SCSI_AACRAID) += aacraid/
69obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o 71obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o
72obj-$(CONFIG_SCSI_AIC94XX) += aic94xx/
70obj-$(CONFIG_SCSI_IPS) += ips.o 73obj-$(CONFIG_SCSI_IPS) += ips.o
71obj-$(CONFIG_SCSI_FD_MCS) += fd_mcs.o 74obj-$(CONFIG_SCSI_FD_MCS) += fd_mcs.o
72obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o 75obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
@@ -123,6 +126,7 @@ obj-$(CONFIG_SCSI_NSP32) += nsp32.o
123obj-$(CONFIG_SCSI_IPR) += ipr.o 126obj-$(CONFIG_SCSI_IPR) += ipr.o
124obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/ 127obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/
125obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o 128obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
129obj-$(CONFIG_SCSI_STEX) += stex.o
126 130
127obj-$(CONFIG_ARM) += arm/ 131obj-$(CONFIG_ARM) += arm/
128 132
@@ -140,6 +144,7 @@ scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \
140 scsicam.o scsi_error.o scsi_lib.o \ 144 scsicam.o scsi_error.o scsi_lib.o \
141 scsi_scan.o scsi_sysfs.o \ 145 scsi_scan.o scsi_sysfs.o \
142 scsi_devinfo.o 146 scsi_devinfo.o
147scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o
143scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o 148scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o
144scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o 149scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
145 150
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index fddfa2ebcd70..085406928605 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -40,7 +40,7 @@ static irqreturn_t a2091_intr (int irq, void *_instance, struct pt_regs *fp)
40 return IRQ_HANDLED; 40 return IRQ_HANDLED;
41} 41}
42 42
43static int dma_setup (Scsi_Cmnd *cmd, int dir_in) 43static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
44{ 44{
45 unsigned short cntr = CNTR_PDMD | CNTR_INTEN; 45 unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
46 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 46 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
@@ -115,7 +115,7 @@ static int dma_setup (Scsi_Cmnd *cmd, int dir_in)
115 return 0; 115 return 0;
116} 116}
117 117
118static void dma_stop (struct Scsi_Host *instance, Scsi_Cmnd *SCpnt, 118static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
119 int status) 119 int status)
120{ 120{
121 /* disable SCSI interrupts */ 121 /* disable SCSI interrupts */
@@ -217,7 +217,7 @@ int __init a2091_detect(struct scsi_host_template *tpnt)
217 return num_a2091; 217 return num_a2091;
218} 218}
219 219
220static int a2091_bus_reset(Scsi_Cmnd *cmd) 220static int a2091_bus_reset(struct scsi_cmnd *cmd)
221{ 221{
222 /* FIXME perform bus-specific reset */ 222 /* FIXME perform bus-specific reset */
223 223
diff --git a/drivers/scsi/a2091.h b/drivers/scsi/a2091.h
index 22d6a13dd8be..fe809bc88d73 100644
--- a/drivers/scsi/a2091.h
+++ b/drivers/scsi/a2091.h
@@ -13,10 +13,6 @@
13 13
14int a2091_detect(struct scsi_host_template *); 14int a2091_detect(struct scsi_host_template *);
15int a2091_release(struct Scsi_Host *); 15int a2091_release(struct Scsi_Host *);
16const char *wd33c93_info(void);
17int wd33c93_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
18int wd33c93_abort(Scsi_Cmnd *);
19int wd33c93_reset(Scsi_Cmnd *, unsigned int);
20 16
21#ifndef CMD_PER_LUN 17#ifndef CMD_PER_LUN
22#define CMD_PER_LUN 2 18#define CMD_PER_LUN 2
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index ae9ab4b136ac..7bf46d40b561 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -44,7 +44,7 @@ static irqreturn_t a3000_intr (int irq, void *dummy, struct pt_regs *fp)
44 return IRQ_NONE; 44 return IRQ_NONE;
45} 45}
46 46
47static int dma_setup (Scsi_Cmnd *cmd, int dir_in) 47static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
48{ 48{
49 unsigned short cntr = CNTR_PDMD | CNTR_INTEN; 49 unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
50 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 50 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
@@ -110,8 +110,8 @@ static int dma_setup (Scsi_Cmnd *cmd, int dir_in)
110 return 0; 110 return 0;
111} 111}
112 112
113static void dma_stop (struct Scsi_Host *instance, Scsi_Cmnd *SCpnt, 113static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
114 int status) 114 int status)
115{ 115{
116 /* disable SCSI interrupts */ 116 /* disable SCSI interrupts */
117 unsigned short cntr = CNTR_PDMD; 117 unsigned short cntr = CNTR_PDMD;
@@ -205,7 +205,7 @@ fail_register:
205 return 0; 205 return 0;
206} 206}
207 207
208static int a3000_bus_reset(Scsi_Cmnd *cmd) 208static int a3000_bus_reset(struct scsi_cmnd *cmd)
209{ 209{
210 /* FIXME perform bus-specific reset */ 210 /* FIXME perform bus-specific reset */
211 211
diff --git a/drivers/scsi/a3000.h b/drivers/scsi/a3000.h
index 5535a65150a4..44a4ec7b4650 100644
--- a/drivers/scsi/a3000.h
+++ b/drivers/scsi/a3000.h
@@ -13,10 +13,6 @@
13 13
14int a3000_detect(struct scsi_host_template *); 14int a3000_detect(struct scsi_host_template *);
15int a3000_release(struct Scsi_Host *); 15int a3000_release(struct Scsi_Host *);
16const char *wd33c93_info(void);
17int wd33c93_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
18int wd33c93_abort(Scsi_Cmnd *);
19int wd33c93_reset(Scsi_Cmnd *, unsigned int);
20 16
21#ifndef CMD_PER_LUN 17#ifndef CMD_PER_LUN
22#define CMD_PER_LUN 2 18#define CMD_PER_LUN 2
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 83b5c7d085f2..ac108f9e2674 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -169,13 +169,17 @@ MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control block
169int acbsize = -1; 169int acbsize = -1;
170module_param(acbsize, int, S_IRUGO|S_IWUSR); 170module_param(acbsize, int, S_IRUGO|S_IWUSR);
171MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512, 2048, 4096 and 8192. Default is to use suggestion from Firmware."); 171MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512, 2048, 4096 and 8192. Default is to use suggestion from Firmware.");
172
173int expose_physicals = 0;
174module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
175MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays. 0=off, 1=on");
172/** 176/**
173 * aac_get_config_status - check the adapter configuration 177 * aac_get_config_status - check the adapter configuration
174 * @common: adapter to query 178 * @common: adapter to query
175 * 179 *
176 * Query config status, and commit the configuration if needed. 180 * Query config status, and commit the configuration if needed.
177 */ 181 */
178int aac_get_config_status(struct aac_dev *dev) 182int aac_get_config_status(struct aac_dev *dev, int commit_flag)
179{ 183{
180 int status = 0; 184 int status = 0;
181 struct fib * fibptr; 185 struct fib * fibptr;
@@ -219,7 +223,7 @@ int aac_get_config_status(struct aac_dev *dev)
219 aac_fib_complete(fibptr); 223 aac_fib_complete(fibptr);
220 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */ 224 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */
221 if (status >= 0) { 225 if (status >= 0) {
222 if (commit == 1) { 226 if ((commit == 1) || commit_flag) {
223 struct aac_commit_config * dinfo; 227 struct aac_commit_config * dinfo;
224 aac_fib_init(fibptr); 228 aac_fib_init(fibptr);
225 dinfo = (struct aac_commit_config *) fib_data(fibptr); 229 dinfo = (struct aac_commit_config *) fib_data(fibptr);
@@ -489,6 +493,8 @@ int aac_probe_container(struct aac_dev *dev, int cid)
489 unsigned instance; 493 unsigned instance;
490 494
491 fsa_dev_ptr = dev->fsa_dev; 495 fsa_dev_ptr = dev->fsa_dev;
496 if (!fsa_dev_ptr)
497 return -ENOMEM;
492 instance = dev->scsi_host_ptr->unique_id; 498 instance = dev->scsi_host_ptr->unique_id;
493 499
494 if (!(fibptr = aac_fib_alloc(dev))) 500 if (!(fibptr = aac_fib_alloc(dev)))
@@ -782,8 +788,9 @@ int aac_get_adapter_info(struct aac_dev* dev)
782 dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount); 788 dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount);
783 } 789 }
784 790
785 tmp = le32_to_cpu(dev->adapter_info.kernelrev); 791 if (!dev->in_reset) {
786 printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n", 792 tmp = le32_to_cpu(dev->adapter_info.kernelrev);
793 printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n",
787 dev->name, 794 dev->name,
788 dev->id, 795 dev->id,
789 tmp>>24, 796 tmp>>24,
@@ -792,20 +799,21 @@ int aac_get_adapter_info(struct aac_dev* dev)
792 le32_to_cpu(dev->adapter_info.kernelbuild), 799 le32_to_cpu(dev->adapter_info.kernelbuild),
793 (int)sizeof(dev->supplement_adapter_info.BuildDate), 800 (int)sizeof(dev->supplement_adapter_info.BuildDate),
794 dev->supplement_adapter_info.BuildDate); 801 dev->supplement_adapter_info.BuildDate);
795 tmp = le32_to_cpu(dev->adapter_info.monitorrev); 802 tmp = le32_to_cpu(dev->adapter_info.monitorrev);
796 printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n", 803 printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n",
797 dev->name, dev->id, 804 dev->name, dev->id,
798 tmp>>24,(tmp>>16)&0xff,tmp&0xff, 805 tmp>>24,(tmp>>16)&0xff,tmp&0xff,
799 le32_to_cpu(dev->adapter_info.monitorbuild)); 806 le32_to_cpu(dev->adapter_info.monitorbuild));
800 tmp = le32_to_cpu(dev->adapter_info.biosrev); 807 tmp = le32_to_cpu(dev->adapter_info.biosrev);
801 printk(KERN_INFO "%s%d: bios %d.%d-%d[%d]\n", 808 printk(KERN_INFO "%s%d: bios %d.%d-%d[%d]\n",
802 dev->name, dev->id, 809 dev->name, dev->id,
803 tmp>>24,(tmp>>16)&0xff,tmp&0xff, 810 tmp>>24,(tmp>>16)&0xff,tmp&0xff,
804 le32_to_cpu(dev->adapter_info.biosbuild)); 811 le32_to_cpu(dev->adapter_info.biosbuild));
805 if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0) 812 if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0)
806 printk(KERN_INFO "%s%d: serial %x\n", 813 printk(KERN_INFO "%s%d: serial %x\n",
807 dev->name, dev->id, 814 dev->name, dev->id,
808 le32_to_cpu(dev->adapter_info.serial[0])); 815 le32_to_cpu(dev->adapter_info.serial[0]));
816 }
809 817
810 dev->nondasd_support = 0; 818 dev->nondasd_support = 0;
811 dev->raid_scsi_mode = 0; 819 dev->raid_scsi_mode = 0;
@@ -1392,6 +1400,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
1392 struct scsi_cmnd *cmd; 1400 struct scsi_cmnd *cmd;
1393 struct scsi_device *sdev = scsicmd->device; 1401 struct scsi_device *sdev = scsicmd->device;
1394 int active = 0; 1402 int active = 0;
1403 struct aac_dev *aac;
1395 unsigned long flags; 1404 unsigned long flags;
1396 1405
1397 /* 1406 /*
@@ -1413,11 +1422,14 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
1413 if (active) 1422 if (active)
1414 return SCSI_MLQUEUE_DEVICE_BUSY; 1423 return SCSI_MLQUEUE_DEVICE_BUSY;
1415 1424
1425 aac = (struct aac_dev *)scsicmd->device->host->hostdata;
1426 if (aac->in_reset)
1427 return SCSI_MLQUEUE_HOST_BUSY;
1428
1416 /* 1429 /*
1417 * Allocate and initialize a Fib 1430 * Allocate and initialize a Fib
1418 */ 1431 */
1419 if (!(cmd_fibcontext = 1432 if (!(cmd_fibcontext = aac_fib_alloc(aac)))
1420 aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata)))
1421 return SCSI_MLQUEUE_HOST_BUSY; 1433 return SCSI_MLQUEUE_HOST_BUSY;
1422 1434
1423 aac_fib_init(cmd_fibcontext); 1435 aac_fib_init(cmd_fibcontext);
@@ -1470,6 +1482,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1470 struct aac_dev *dev = (struct aac_dev *)host->hostdata; 1482 struct aac_dev *dev = (struct aac_dev *)host->hostdata;
1471 struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev; 1483 struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
1472 1484
1485 if (fsa_dev_ptr == NULL)
1486 return -1;
1473 /* 1487 /*
1474 * If the bus, id or lun is out of range, return fail 1488 * If the bus, id or lun is out of range, return fail
1475 * Test does not apply to ID 16, the pseudo id for the controller 1489 * Test does not apply to ID 16, the pseudo id for the controller
@@ -1499,6 +1513,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1499 case INQUIRY: 1513 case INQUIRY:
1500 case READ_CAPACITY: 1514 case READ_CAPACITY:
1501 case TEST_UNIT_READY: 1515 case TEST_UNIT_READY:
1516 if (dev->in_reset)
1517 return -1;
1502 spin_unlock_irq(host->host_lock); 1518 spin_unlock_irq(host->host_lock);
1503 aac_probe_container(dev, cid); 1519 aac_probe_container(dev, cid);
1504 if ((fsa_dev_ptr[cid].valid & 1) == 0) 1520 if ((fsa_dev_ptr[cid].valid & 1) == 0)
@@ -1523,7 +1539,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1523 return 0; 1539 return 0;
1524 } 1540 }
1525 } else { /* check for physical non-dasd devices */ 1541 } else { /* check for physical non-dasd devices */
1526 if(dev->nondasd_support == 1){ 1542 if ((dev->nondasd_support == 1) || expose_physicals) {
1543 if (dev->in_reset)
1544 return -1;
1527 return aac_send_srb_fib(scsicmd); 1545 return aac_send_srb_fib(scsicmd);
1528 } else { 1546 } else {
1529 scsicmd->result = DID_NO_CONNECT << 16; 1547 scsicmd->result = DID_NO_CONNECT << 16;
@@ -1579,6 +1597,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1579 scsicmd->scsi_done(scsicmd); 1597 scsicmd->scsi_done(scsicmd);
1580 return 0; 1598 return 0;
1581 } 1599 }
1600 if (dev->in_reset)
1601 return -1;
1582 setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type); 1602 setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type);
1583 inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */ 1603 inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */
1584 aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data)); 1604 aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
@@ -1734,6 +1754,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1734 case READ_10: 1754 case READ_10:
1735 case READ_12: 1755 case READ_12:
1736 case READ_16: 1756 case READ_16:
1757 if (dev->in_reset)
1758 return -1;
1737 /* 1759 /*
1738 * Hack to keep track of ordinal number of the device that 1760 * Hack to keep track of ordinal number of the device that
1739 * corresponds to a container. Needed to convert 1761 * corresponds to a container. Needed to convert
@@ -1752,6 +1774,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1752 case WRITE_10: 1774 case WRITE_10:
1753 case WRITE_12: 1775 case WRITE_12:
1754 case WRITE_16: 1776 case WRITE_16:
1777 if (dev->in_reset)
1778 return -1;
1755 return aac_write(scsicmd, cid); 1779 return aac_write(scsicmd, cid);
1756 1780
1757 case SYNCHRONIZE_CACHE: 1781 case SYNCHRONIZE_CACHE:
@@ -1782,6 +1806,8 @@ static int query_disk(struct aac_dev *dev, void __user *arg)
1782 struct fsa_dev_info *fsa_dev_ptr; 1806 struct fsa_dev_info *fsa_dev_ptr;
1783 1807
1784 fsa_dev_ptr = dev->fsa_dev; 1808 fsa_dev_ptr = dev->fsa_dev;
1809 if (!fsa_dev_ptr)
1810 return -EBUSY;
1785 if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk))) 1811 if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
1786 return -EFAULT; 1812 return -EFAULT;
1787 if (qd.cnum == -1) 1813 if (qd.cnum == -1)
@@ -1820,6 +1846,8 @@ static int force_delete_disk(struct aac_dev *dev, void __user *arg)
1820 struct fsa_dev_info *fsa_dev_ptr; 1846 struct fsa_dev_info *fsa_dev_ptr;
1821 1847
1822 fsa_dev_ptr = dev->fsa_dev; 1848 fsa_dev_ptr = dev->fsa_dev;
1849 if (!fsa_dev_ptr)
1850 return -EBUSY;
1823 1851
1824 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk))) 1852 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
1825 return -EFAULT; 1853 return -EFAULT;
@@ -1843,6 +1871,8 @@ static int delete_disk(struct aac_dev *dev, void __user *arg)
1843 struct fsa_dev_info *fsa_dev_ptr; 1871 struct fsa_dev_info *fsa_dev_ptr;
1844 1872
1845 fsa_dev_ptr = dev->fsa_dev; 1873 fsa_dev_ptr = dev->fsa_dev;
1874 if (!fsa_dev_ptr)
1875 return -EBUSY;
1846 1876
1847 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk))) 1877 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
1848 return -EFAULT; 1878 return -EFAULT;
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index d0eecd4bec83..eb3ed91bac79 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -494,6 +494,7 @@ struct adapter_ops
494 int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4); 494 int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4);
495 int (*adapter_check_health)(struct aac_dev *dev); 495 int (*adapter_check_health)(struct aac_dev *dev);
496 int (*adapter_send)(struct fib * fib); 496 int (*adapter_send)(struct fib * fib);
497 int (*adapter_ioremap)(struct aac_dev * dev, u32 size);
497}; 498};
498 499
499/* 500/*
@@ -682,14 +683,6 @@ struct rx_inbound {
682 __le32 Mailbox[8]; 683 __le32 Mailbox[8];
683}; 684};
684 685
685#define InboundMailbox0 IndexRegs.Mailbox[0]
686#define InboundMailbox1 IndexRegs.Mailbox[1]
687#define InboundMailbox2 IndexRegs.Mailbox[2]
688#define InboundMailbox3 IndexRegs.Mailbox[3]
689#define InboundMailbox4 IndexRegs.Mailbox[4]
690#define InboundMailbox5 IndexRegs.Mailbox[5]
691#define InboundMailbox6 IndexRegs.Mailbox[6]
692
693#define INBOUNDDOORBELL_0 0x00000001 686#define INBOUNDDOORBELL_0 0x00000001
694#define INBOUNDDOORBELL_1 0x00000002 687#define INBOUNDDOORBELL_1 0x00000002
695#define INBOUNDDOORBELL_2 0x00000004 688#define INBOUNDDOORBELL_2 0x00000004
@@ -1010,6 +1003,8 @@ struct aac_dev
1010 struct rx_registers __iomem *rx; 1003 struct rx_registers __iomem *rx;
1011 struct rkt_registers __iomem *rkt; 1004 struct rkt_registers __iomem *rkt;
1012 } regs; 1005 } regs;
1006 volatile void __iomem *base;
1007 volatile struct rx_inbound __iomem *IndexRegs;
1013 u32 OIMR; /* Mask Register Cache */ 1008 u32 OIMR; /* Mask Register Cache */
1014 /* 1009 /*
1015 * AIF thread states 1010 * AIF thread states
@@ -1029,6 +1024,7 @@ struct aac_dev
1029 init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4) 1024 init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4)
1030 u8 raw_io_64; 1025 u8 raw_io_64;
1031 u8 printf_enabled; 1026 u8 printf_enabled;
1027 u8 in_reset;
1032}; 1028};
1033 1029
1034#define aac_adapter_interrupt(dev) \ 1030#define aac_adapter_interrupt(dev) \
@@ -1049,6 +1045,9 @@ struct aac_dev
1049#define aac_adapter_send(fib) \ 1045#define aac_adapter_send(fib) \
1050 ((fib)->dev)->a_ops.adapter_send(fib) 1046 ((fib)->dev)->a_ops.adapter_send(fib)
1051 1047
1048#define aac_adapter_ioremap(dev, size) \
1049 (dev)->a_ops.adapter_ioremap(dev, size)
1050
1052#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001) 1051#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001)
1053 1052
1054/* 1053/*
@@ -1524,7 +1523,6 @@ struct aac_get_name {
1524 __le32 count; /* sizeof(((struct aac_get_name_resp *)NULL)->data) */ 1523 __le32 count; /* sizeof(((struct aac_get_name_resp *)NULL)->data) */
1525}; 1524};
1526 1525
1527#define CT_OK 218
1528struct aac_get_name_resp { 1526struct aac_get_name_resp {
1529 __le32 dummy0; 1527 __le32 dummy0;
1530 __le32 dummy1; 1528 __le32 dummy1;
@@ -1670,6 +1668,7 @@ extern struct aac_common aac_config;
1670#define RCV_TEMP_READINGS 0x00000025 1668#define RCV_TEMP_READINGS 0x00000025
1671#define GET_COMM_PREFERRED_SETTINGS 0x00000026 1669#define GET_COMM_PREFERRED_SETTINGS 0x00000026
1672#define IOP_RESET 0x00001000 1670#define IOP_RESET 0x00001000
1671#define IOP_RESET_ALWAYS 0x00001001
1673#define RE_INIT_ADAPTER 0x000000ee 1672#define RE_INIT_ADAPTER 0x000000ee
1674 1673
1675/* 1674/*
@@ -1788,7 +1787,7 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum);
1788int aac_fib_complete(struct fib * context); 1787int aac_fib_complete(struct fib * context);
1789#define fib_data(fibctx) ((void *)(fibctx)->hw_fib->data) 1788#define fib_data(fibctx) ((void *)(fibctx)->hw_fib->data)
1790struct aac_dev *aac_init_adapter(struct aac_dev *dev); 1789struct aac_dev *aac_init_adapter(struct aac_dev *dev);
1791int aac_get_config_status(struct aac_dev *dev); 1790int aac_get_config_status(struct aac_dev *dev, int commit_flag);
1792int aac_get_containers(struct aac_dev *dev); 1791int aac_get_containers(struct aac_dev *dev);
1793int aac_scsi_cmd(struct scsi_cmnd *cmd); 1792int aac_scsi_cmd(struct scsi_cmnd *cmd);
1794int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg); 1793int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg);
@@ -1799,6 +1798,7 @@ int aac_sa_init(struct aac_dev *dev);
1799unsigned int aac_response_normal(struct aac_queue * q); 1798unsigned int aac_response_normal(struct aac_queue * q);
1800unsigned int aac_command_normal(struct aac_queue * q); 1799unsigned int aac_command_normal(struct aac_queue * q);
1801unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index); 1800unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index);
1801int aac_check_health(struct aac_dev * dev);
1802int aac_command_thread(void *data); 1802int aac_command_thread(void *data);
1803int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx); 1803int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx);
1804int aac_fib_adapter_complete(struct fib * fibptr, unsigned short size); 1804int aac_fib_adapter_complete(struct fib * fibptr, unsigned short size);
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 255421de9d1a..da1d3a9212f8 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -38,7 +38,7 @@
38#include <linux/completion.h> 38#include <linux/completion.h>
39#include <linux/dma-mapping.h> 39#include <linux/dma-mapping.h>
40#include <linux/blkdev.h> 40#include <linux/blkdev.h>
41#include <linux/delay.h> 41#include <linux/delay.h> /* ssleep prototype */
42#include <linux/kthread.h> 42#include <linux/kthread.h>
43#include <asm/semaphore.h> 43#include <asm/semaphore.h>
44#include <asm/uaccess.h> 44#include <asm/uaccess.h>
@@ -140,7 +140,8 @@ cleanup:
140 fibptr->hw_fib_pa = hw_fib_pa; 140 fibptr->hw_fib_pa = hw_fib_pa;
141 fibptr->hw_fib = hw_fib; 141 fibptr->hw_fib = hw_fib;
142 } 142 }
143 aac_fib_free(fibptr); 143 if (retval != -EINTR)
144 aac_fib_free(fibptr);
144 return retval; 145 return retval;
145} 146}
146 147
@@ -297,7 +298,7 @@ return_fib:
297 spin_unlock_irqrestore(&dev->fib_lock, flags); 298 spin_unlock_irqrestore(&dev->fib_lock, flags);
298 /* If someone killed the AIF aacraid thread, restart it */ 299 /* If someone killed the AIF aacraid thread, restart it */
299 status = !dev->aif_thread; 300 status = !dev->aif_thread;
300 if (status && dev->queues && dev->fsa_dev) { 301 if (status && !dev->in_reset && dev->queues && dev->fsa_dev) {
301 /* Be paranoid, be very paranoid! */ 302 /* Be paranoid, be very paranoid! */
302 kthread_stop(dev->thread); 303 kthread_stop(dev->thread);
303 ssleep(1); 304 ssleep(1);
@@ -621,7 +622,13 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
621 622
622 actual_fibsize = sizeof (struct aac_srb) + (((user_srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry)); 623 actual_fibsize = sizeof (struct aac_srb) + (((user_srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry));
623 if(actual_fibsize != fibsize){ // User made a mistake - should not continue 624 if(actual_fibsize != fibsize){ // User made a mistake - should not continue
624 dprintk((KERN_DEBUG"aacraid: Bad Size specified in Raw SRB command\n")); 625 dprintk((KERN_DEBUG"aacraid: Bad Size specified in "
626 "Raw SRB command calculated fibsize=%d "
627 "user_srbcmd->sg.count=%d aac_srb=%d sgentry=%d "
628 "issued fibsize=%d\n",
629 actual_fibsize, user_srbcmd->sg.count,
630 sizeof(struct aac_srb), sizeof(struct sgentry),
631 fibsize));
625 rcode = -EINVAL; 632 rcode = -EINVAL;
626 goto cleanup; 633 goto cleanup;
627 } 634 }
@@ -663,6 +670,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
663 psg->count = cpu_to_le32(sg_indx+1); 670 psg->count = cpu_to_le32(sg_indx+1);
664 status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); 671 status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
665 } 672 }
673 if (status == -EINTR) {
674 rcode = -EINTR;
675 goto cleanup;
676 }
666 677
667 if (status != 0){ 678 if (status != 0){
668 dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n")); 679 dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n"));
@@ -696,8 +707,10 @@ cleanup:
696 for(i=0; i <= sg_indx; i++){ 707 for(i=0; i <= sg_indx; i++){
697 kfree(sg_list[i]); 708 kfree(sg_list[i]);
698 } 709 }
699 aac_fib_complete(srbfib); 710 if (rcode != -EINTR) {
700 aac_fib_free(srbfib); 711 aac_fib_complete(srbfib);
712 aac_fib_free(srbfib);
713 }
701 714
702 return rcode; 715 return rcode;
703} 716}
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 1cd3584ba7ff..d5cf8b91a0e7 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -180,7 +180,7 @@ int aac_send_shutdown(struct aac_dev * dev)
180 -2 /* Timeout silently */, 1, 180 -2 /* Timeout silently */, 1,
181 NULL, NULL); 181 NULL, NULL);
182 182
183 if (status == 0) 183 if (status >= 0)
184 aac_fib_complete(fibctx); 184 aac_fib_complete(fibctx);
185 aac_fib_free(fibctx); 185 aac_fib_free(fibctx);
186 return status; 186 return status;
@@ -307,17 +307,12 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
307 if (status[1] & AAC_OPT_NEW_COMM) 307 if (status[1] & AAC_OPT_NEW_COMM)
308 dev->new_comm_interface = dev->a_ops.adapter_send != 0; 308 dev->new_comm_interface = dev->a_ops.adapter_send != 0;
309 if (dev->new_comm_interface && (status[2] > dev->base_size)) { 309 if (dev->new_comm_interface && (status[2] > dev->base_size)) {
310 iounmap(dev->regs.sa); 310 aac_adapter_ioremap(dev, 0);
311 dev->base_size = status[2]; 311 dev->base_size = status[2];
312 dprintk((KERN_DEBUG "ioremap(%lx,%d)\n", 312 if (aac_adapter_ioremap(dev, status[2])) {
313 host->base, status[2]));
314 dev->regs.sa = ioremap(host->base, status[2]);
315 if (dev->regs.sa == NULL) {
316 /* remap failed, go back ... */ 313 /* remap failed, go back ... */
317 dev->new_comm_interface = 0; 314 dev->new_comm_interface = 0;
318 dev->regs.sa = ioremap(host->base, 315 if (aac_adapter_ioremap(dev, AAC_MIN_FOOTPRINT_SIZE)) {
319 AAC_MIN_FOOTPRINT_SIZE);
320 if (dev->regs.sa == NULL) {
321 printk(KERN_WARNING 316 printk(KERN_WARNING
322 "aacraid: unable to map adapter.\n"); 317 "aacraid: unable to map adapter.\n");
323 return NULL; 318 return NULL;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 3f27419c66af..8734a045558e 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -40,8 +40,10 @@
40#include <linux/blkdev.h> 40#include <linux/blkdev.h>
41#include <linux/delay.h> 41#include <linux/delay.h>
42#include <linux/kthread.h> 42#include <linux/kthread.h>
43#include <scsi/scsi.h>
43#include <scsi/scsi_host.h> 44#include <scsi/scsi_host.h>
44#include <scsi/scsi_device.h> 45#include <scsi/scsi_device.h>
46#include <scsi/scsi_cmnd.h>
45#include <asm/semaphore.h> 47#include <asm/semaphore.h>
46 48
47#include "aacraid.h" 49#include "aacraid.h"
@@ -464,6 +466,8 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
464 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa)); 466 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
465 dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr)); 467 dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
466 468
469 if (!dev->queues)
470 return -EBUSY;
467 q = &dev->queues->queue[AdapNormCmdQueue]; 471 q = &dev->queues->queue[AdapNormCmdQueue];
468 472
469 if(wait) 473 if(wait)
@@ -527,8 +531,15 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
527 } 531 }
528 udelay(5); 532 udelay(5);
529 } 533 }
530 } else 534 } else if (down_interruptible(&fibptr->event_wait)) {
531 down(&fibptr->event_wait); 535 spin_lock_irqsave(&fibptr->event_lock, flags);
536 if (fibptr->done == 0) {
537 fibptr->done = 2; /* Tell interrupt we aborted */
538 spin_unlock_irqrestore(&fibptr->event_lock, flags);
539 return -EINTR;
540 }
541 spin_unlock_irqrestore(&fibptr->event_lock, flags);
542 }
532 BUG_ON(fibptr->done == 0); 543 BUG_ON(fibptr->done == 0);
533 544
534 if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){ 545 if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){
@@ -795,7 +806,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
795 806
796 /* Sniff for container changes */ 807 /* Sniff for container changes */
797 808
798 if (!dev) 809 if (!dev || !dev->fsa_dev)
799 return; 810 return;
800 container = (u32)-1; 811 container = (u32)-1;
801 812
@@ -1022,13 +1033,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
1022 if (device) { 1033 if (device) {
1023 switch (device_config_needed) { 1034 switch (device_config_needed) {
1024 case DELETE: 1035 case DELETE:
1025 scsi_remove_device(device);
1026 break;
1027 case CHANGE: 1036 case CHANGE:
1028 if (!dev->fsa_dev[container].valid) {
1029 scsi_remove_device(device);
1030 break;
1031 }
1032 scsi_rescan_device(&device->sdev_gendev); 1037 scsi_rescan_device(&device->sdev_gendev);
1033 1038
1034 default: 1039 default:
@@ -1045,6 +1050,262 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
1045 1050
1046} 1051}
1047 1052
1053static int _aac_reset_adapter(struct aac_dev *aac)
1054{
1055 int index, quirks;
1056 u32 ret;
1057 int retval;
1058 struct Scsi_Host *host;
1059 struct scsi_device *dev;
1060 struct scsi_cmnd *command;
1061 struct scsi_cmnd *command_list;
1062
1063 /*
1064 * Assumptions:
1065 * - host is locked.
1066 * - in_reset is asserted, so no new i/o is getting to the
1067 * card.
1068 * - The card is dead.
1069 */
1070 host = aac->scsi_host_ptr;
1071 scsi_block_requests(host);
1072 aac_adapter_disable_int(aac);
1073 spin_unlock_irq(host->host_lock);
1074 kthread_stop(aac->thread);
1075
1076 /*
1077 * If a positive health, means in a known DEAD PANIC
1078 * state and the adapter could be reset to `try again'.
1079 */
1080 retval = aac_adapter_check_health(aac);
1081 if (retval == 0)
1082 retval = aac_adapter_sync_cmd(aac, IOP_RESET_ALWAYS,
1083 0, 0, 0, 0, 0, 0, &ret, NULL, NULL, NULL, NULL);
1084 if (retval)
1085 retval = aac_adapter_sync_cmd(aac, IOP_RESET,
1086 0, 0, 0, 0, 0, 0, &ret, NULL, NULL, NULL, NULL);
1087
1088 if (retval)
1089 goto out;
1090 if (ret != 0x00000001) {
1091 retval = -ENODEV;
1092 goto out;
1093 }
1094
1095 index = aac->cardtype;
1096
1097 /*
1098 * Re-initialize the adapter, first free resources, then carefully
1099 * apply the initialization sequence to come back again. Only risk
1100 * is a change in Firmware dropping cache, it is assumed the caller
1101 * will ensure that i/o is queisced and the card is flushed in that
1102 * case.
1103 */
1104 aac_fib_map_free(aac);
1105 aac->hw_fib_va = NULL;
1106 aac->hw_fib_pa = 0;
1107 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
1108 aac->comm_addr = NULL;
1109 aac->comm_phys = 0;
1110 kfree(aac->queues);
1111 aac->queues = NULL;
1112 free_irq(aac->pdev->irq, aac);
1113 kfree(aac->fsa_dev);
1114 aac->fsa_dev = NULL;
1115 if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT) {
1116 if (((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK))) ||
1117 ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_32BIT_MASK))))
1118 goto out;
1119 } else {
1120 if (((retval = pci_set_dma_mask(aac->pdev, 0x7FFFFFFFULL))) ||
1121 ((retval = pci_set_consistent_dma_mask(aac->pdev, 0x7FFFFFFFULL))))
1122 goto out;
1123 }
1124 if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
1125 goto out;
1126 if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT)
1127 if ((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK)))
1128 goto out;
1129 aac->thread = kthread_run(aac_command_thread, aac, aac->name);
1130 if (IS_ERR(aac->thread)) {
1131 retval = PTR_ERR(aac->thread);
1132 goto out;
1133 }
1134 (void)aac_get_adapter_info(aac);
1135 quirks = aac_get_driver_ident(index)->quirks;
1136 if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
1137 host->sg_tablesize = 34;
1138 host->max_sectors = (host->sg_tablesize * 8) + 112;
1139 }
1140 if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
1141 host->sg_tablesize = 17;
1142 host->max_sectors = (host->sg_tablesize * 8) + 112;
1143 }
1144 aac_get_config_status(aac, 1);
1145 aac_get_containers(aac);
1146 /*
1147 * This is where the assumption that the Adapter is quiesced
1148 * is important.
1149 */
1150 command_list = NULL;
1151 __shost_for_each_device(dev, host) {
1152 unsigned long flags;
1153 spin_lock_irqsave(&dev->list_lock, flags);
1154 list_for_each_entry(command, &dev->cmd_list, list)
1155 if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
1156 command->SCp.buffer = (struct scatterlist *)command_list;
1157 command_list = command;
1158 }
1159 spin_unlock_irqrestore(&dev->list_lock, flags);
1160 }
1161 while ((command = command_list)) {
1162 command_list = (struct scsi_cmnd *)command->SCp.buffer;
1163 command->SCp.buffer = NULL;
1164 command->result = DID_OK << 16
1165 | COMMAND_COMPLETE << 8
1166 | SAM_STAT_TASK_SET_FULL;
1167 command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
1168 command->scsi_done(command);
1169 }
1170 retval = 0;
1171
1172out:
1173 aac->in_reset = 0;
1174 scsi_unblock_requests(host);
1175 spin_lock_irq(host->host_lock);
1176 return retval;
1177}
1178
1179int aac_check_health(struct aac_dev * aac)
1180{
1181 int BlinkLED;
1182 unsigned long time_now, flagv = 0;
1183 struct list_head * entry;
1184 struct Scsi_Host * host;
1185
1186 /* Extending the scope of fib_lock slightly to protect aac->in_reset */
1187 if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1188 return 0;
1189
1190 if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) {
1191 spin_unlock_irqrestore(&aac->fib_lock, flagv);
1192 return 0; /* OK */
1193 }
1194
1195 aac->in_reset = 1;
1196
1197 /* Fake up an AIF:
1198 * aac_aifcmd.command = AifCmdEventNotify = 1
1199 * aac_aifcmd.seqnum = 0xFFFFFFFF
1200 * aac_aifcmd.data[0] = AifEnExpEvent = 23
1201 * aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
1202 * aac.aifcmd.data[2] = AifHighPriority = 3
1203 * aac.aifcmd.data[3] = BlinkLED
1204 */
1205
1206 time_now = jiffies/HZ;
1207 entry = aac->fib_list.next;
1208
1209 /*
1210 * For each Context that is on the
1211 * fibctxList, make a copy of the
1212 * fib, and then set the event to wake up the
1213 * thread that is waiting for it.
1214 */
1215 while (entry != &aac->fib_list) {
1216 /*
1217 * Extract the fibctx
1218 */
1219 struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
1220 struct hw_fib * hw_fib;
1221 struct fib * fib;
1222 /*
1223 * Check if the queue is getting
1224 * backlogged
1225 */
1226 if (fibctx->count > 20) {
1227 /*
1228 * It's *not* jiffies folks,
1229 * but jiffies / HZ, so do not
1230 * panic ...
1231 */
1232 u32 time_last = fibctx->jiffies;
1233 /*
1234 * Has it been > 2 minutes
1235 * since the last read off
1236 * the queue?
1237 */
1238 if ((time_now - time_last) > aif_timeout) {
1239 entry = entry->next;
1240 aac_close_fib_context(aac, fibctx);
1241 continue;
1242 }
1243 }
1244 /*
1245 * Warning: no sleep allowed while
1246 * holding spinlock
1247 */
1248 hw_fib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
1249 fib = kmalloc(sizeof(struct fib), GFP_ATOMIC);
1250 if (fib && hw_fib) {
1251 struct aac_aifcmd * aif;
1252
1253 memset(hw_fib, 0, sizeof(struct hw_fib));
1254 memset(fib, 0, sizeof(struct fib));
1255 fib->hw_fib = hw_fib;
1256 fib->dev = aac;
1257 aac_fib_init(fib);
1258 fib->type = FSAFS_NTC_FIB_CONTEXT;
1259 fib->size = sizeof (struct fib);
1260 fib->data = hw_fib->data;
1261 aif = (struct aac_aifcmd *)hw_fib->data;
1262 aif->command = cpu_to_le32(AifCmdEventNotify);
1263 aif->seqnum = cpu_to_le32(0xFFFFFFFF);
1264 aif->data[0] = cpu_to_le32(AifEnExpEvent);
1265 aif->data[1] = cpu_to_le32(AifExeFirmwarePanic);
1266 aif->data[2] = cpu_to_le32(AifHighPriority);
1267 aif->data[3] = cpu_to_le32(BlinkLED);
1268
1269 /*
1270 * Put the FIB onto the
1271 * fibctx's fibs
1272 */
1273 list_add_tail(&fib->fiblink, &fibctx->fib_list);
1274 fibctx->count++;
1275 /*
1276 * Set the event to wake up the
1277 * thread that will waiting.
1278 */
1279 up(&fibctx->wait_sem);
1280 } else {
1281 printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1282 kfree(fib);
1283 kfree(hw_fib);
1284 }
1285 entry = entry->next;
1286 }
1287
1288 spin_unlock_irqrestore(&aac->fib_lock, flagv);
1289
1290 if (BlinkLED < 0) {
1291 printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED);
1292 goto out;
1293 }
1294
1295 printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
1296
1297 host = aac->scsi_host_ptr;
1298 spin_lock_irqsave(host->host_lock, flagv);
1299 BlinkLED = _aac_reset_adapter(aac);
1300 spin_unlock_irqrestore(host->host_lock, flagv);
1301 return BlinkLED;
1302
1303out:
1304 aac->in_reset = 0;
1305 return BlinkLED;
1306}
1307
1308
1048/** 1309/**
1049 * aac_command_thread - command processing thread 1310 * aac_command_thread - command processing thread
1050 * @dev: Adapter to monitor 1311 * @dev: Adapter to monitor
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index b2a5c7262f36..8335f07b7720 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -124,10 +124,15 @@ unsigned int aac_response_normal(struct aac_queue * q)
124 } else { 124 } else {
125 unsigned long flagv; 125 unsigned long flagv;
126 spin_lock_irqsave(&fib->event_lock, flagv); 126 spin_lock_irqsave(&fib->event_lock, flagv);
127 fib->done = 1; 127 if (!fib->done)
128 fib->done = 1;
128 up(&fib->event_wait); 129 up(&fib->event_wait);
129 spin_unlock_irqrestore(&fib->event_lock, flagv); 130 spin_unlock_irqrestore(&fib->event_lock, flagv);
130 FIB_COUNTER_INCREMENT(aac_config.NormalRecved); 131 FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
132 if (fib->done == 2) {
133 aac_fib_complete(fib);
134 aac_fib_free(fib);
135 }
131 } 136 }
132 consumed++; 137 consumed++;
133 spin_lock_irqsave(q->lock, flags); 138 spin_lock_irqsave(q->lock, flags);
@@ -316,7 +321,8 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index)
316 unsigned long flagv; 321 unsigned long flagv;
317 dprintk((KERN_INFO "event_wait up\n")); 322 dprintk((KERN_INFO "event_wait up\n"));
318 spin_lock_irqsave(&fib->event_lock, flagv); 323 spin_lock_irqsave(&fib->event_lock, flagv);
319 fib->done = 1; 324 if (!fib->done)
325 fib->done = 1;
320 up(&fib->event_wait); 326 up(&fib->event_wait);
321 spin_unlock_irqrestore(&fib->event_lock, flagv); 327 spin_unlock_irqrestore(&fib->event_lock, flagv);
322 FIB_COUNTER_INCREMENT(aac_config.NormalRecved); 328 FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index e42a479ce64a..359e7ddfdb47 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -82,6 +82,8 @@ static LIST_HEAD(aac_devices);
82static int aac_cfg_major = -1; 82static int aac_cfg_major = -1;
83char aac_driver_version[] = AAC_DRIVER_FULL_VERSION; 83char aac_driver_version[] = AAC_DRIVER_FULL_VERSION;
84 84
85extern int expose_physicals;
86
85/* 87/*
86 * Because of the way Linux names scsi devices, the order in this table has 88 * Because of the way Linux names scsi devices, the order in this table has
87 * become important. Check for on-board Raid first, add-in cards second. 89 * become important. Check for on-board Raid first, add-in cards second.
@@ -394,6 +396,7 @@ static int aac_slave_configure(struct scsi_device *sdev)
394 sdev->skip_ms_page_3f = 1; 396 sdev->skip_ms_page_3f = 1;
395 } 397 }
396 if ((sdev->type == TYPE_DISK) && 398 if ((sdev->type == TYPE_DISK) &&
399 !expose_physicals &&
397 (sdev_channel(sdev) != CONTAINER_CHANNEL)) { 400 (sdev_channel(sdev) != CONTAINER_CHANNEL)) {
398 struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata; 401 struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
399 if (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) 402 if (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
@@ -454,17 +457,17 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
454 printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n", 457 printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n",
455 AAC_DRIVERNAME); 458 AAC_DRIVERNAME);
456 aac = (struct aac_dev *)host->hostdata; 459 aac = (struct aac_dev *)host->hostdata;
457 if (aac_adapter_check_health(aac)) { 460
458 printk(KERN_ERR "%s: Host adapter appears dead\n", 461 if ((count = aac_check_health(aac)))
459 AAC_DRIVERNAME); 462 return count;
460 return -ENODEV;
461 }
462 /* 463 /*
463 * Wait for all commands to complete to this specific 464 * Wait for all commands to complete to this specific
464 * target (block maximum 60 seconds). 465 * target (block maximum 60 seconds).
465 */ 466 */
466 for (count = 60; count; --count) { 467 for (count = 60; count; --count) {
467 int active = 0; 468 int active = aac->in_reset;
469
470 if (active == 0)
468 __shost_for_each_device(dev, host) { 471 __shost_for_each_device(dev, host) {
469 spin_lock_irqsave(&dev->list_lock, flags); 472 spin_lock_irqsave(&dev->list_lock, flags);
470 list_for_each_entry(command, &dev->cmd_list, list) { 473 list_for_each_entry(command, &dev->cmd_list, list) {
@@ -864,13 +867,6 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
864 * Map in the registers from the adapter. 867 * Map in the registers from the adapter.
865 */ 868 */
866 aac->base_size = AAC_MIN_FOOTPRINT_SIZE; 869 aac->base_size = AAC_MIN_FOOTPRINT_SIZE;
867 if ((aac->regs.sa = ioremap(
868 (unsigned long)aac->scsi_host_ptr->base, AAC_MIN_FOOTPRINT_SIZE))
869 == NULL) {
870 printk(KERN_WARNING "%s: unable to map adapter.\n",
871 AAC_DRIVERNAME);
872 goto out_free_fibs;
873 }
874 if ((*aac_drivers[index].init)(aac)) 870 if ((*aac_drivers[index].init)(aac))
875 goto out_unmap; 871 goto out_unmap;
876 872
@@ -928,12 +924,12 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
928 * all containers are on the virtual channel 0 (CONTAINER_CHANNEL) 924 * all containers are on the virtual channel 0 (CONTAINER_CHANNEL)
929 * physical channels are address by their actual physical number+1 925 * physical channels are address by their actual physical number+1
930 */ 926 */
931 if (aac->nondasd_support == 1) 927 if ((aac->nondasd_support == 1) || expose_physicals)
932 shost->max_channel = aac->maximum_num_channels; 928 shost->max_channel = aac->maximum_num_channels;
933 else 929 else
934 shost->max_channel = 0; 930 shost->max_channel = 0;
935 931
936 aac_get_config_status(aac); 932 aac_get_config_status(aac, 0);
937 aac_get_containers(aac); 933 aac_get_containers(aac);
938 list_add(&aac->entry, insert); 934 list_add(&aac->entry, insert);
939 935
@@ -969,8 +965,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
969 aac_fib_map_free(aac); 965 aac_fib_map_free(aac);
970 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys); 966 pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
971 kfree(aac->queues); 967 kfree(aac->queues);
972 iounmap(aac->regs.sa); 968 aac_adapter_ioremap(aac, 0);
973 out_free_fibs:
974 kfree(aac->fibs); 969 kfree(aac->fibs);
975 kfree(aac->fsa_dev); 970 kfree(aac->fsa_dev);
976 out_free_host: 971 out_free_host:
@@ -1005,7 +1000,7 @@ static void __devexit aac_remove_one(struct pci_dev *pdev)
1005 kfree(aac->queues); 1000 kfree(aac->queues);
1006 1001
1007 free_irq(pdev->irq, aac); 1002 free_irq(pdev->irq, aac);
1008 iounmap(aac->regs.sa); 1003 aac_adapter_ioremap(aac, 0);
1009 1004
1010 kfree(aac->fibs); 1005 kfree(aac->fibs);
1011 kfree(aac->fsa_dev); 1006 kfree(aac->fsa_dev);
@@ -1013,6 +1008,10 @@ static void __devexit aac_remove_one(struct pci_dev *pdev)
1013 list_del(&aac->entry); 1008 list_del(&aac->entry);
1014 scsi_host_put(shost); 1009 scsi_host_put(shost);
1015 pci_disable_device(pdev); 1010 pci_disable_device(pdev);
1011 if (list_empty(&aac_devices)) {
1012 unregister_chrdev(aac_cfg_major, "aac");
1013 aac_cfg_major = -1;
1014 }
1016} 1015}
1017 1016
1018static struct pci_driver aac_pci_driver = { 1017static struct pci_driver aac_pci_driver = {
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c
index 458ea897fd72..643f23b5ded8 100644
--- a/drivers/scsi/aacraid/rkt.c
+++ b/drivers/scsi/aacraid/rkt.c
@@ -28,370 +28,27 @@
28 * 28 *
29 */ 29 */
30 30
31#include <linux/kernel.h>
32#include <linux/init.h>
33#include <linux/types.h>
34#include <linux/sched.h>
35#include <linux/pci.h>
36#include <linux/spinlock.h>
37#include <linux/slab.h>
38#include <linux/blkdev.h> 31#include <linux/blkdev.h>
39#include <linux/delay.h>
40#include <linux/completion.h>
41#include <linux/time.h>
42#include <linux/interrupt.h>
43#include <asm/semaphore.h>
44 32
45#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
46 34
47#include "aacraid.h" 35#include "aacraid.h"
48 36
49static irqreturn_t aac_rkt_intr(int irq, void *dev_id, struct pt_regs *regs)
50{
51 struct aac_dev *dev = dev_id;
52
53 if (dev->new_comm_interface) {
54 u32 Index = rkt_readl(dev, MUnit.OutboundQueue);
55 if (Index == 0xFFFFFFFFL)
56 Index = rkt_readl(dev, MUnit.OutboundQueue);
57 if (Index != 0xFFFFFFFFL) {
58 do {
59 if (aac_intr_normal(dev, Index)) {
60 rkt_writel(dev, MUnit.OutboundQueue, Index);
61 rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormRespReady);
62 }
63 Index = rkt_readl(dev, MUnit.OutboundQueue);
64 } while (Index != 0xFFFFFFFFL);
65 return IRQ_HANDLED;
66 }
67 } else {
68 unsigned long bellbits;
69 u8 intstat;
70 intstat = rkt_readb(dev, MUnit.OISR);
71 /*
72 * Read mask and invert because drawbridge is reversed.
73 * This allows us to only service interrupts that have
74 * been enabled.
75 * Check to see if this is our interrupt. If it isn't just return
76 */
77 if (intstat & ~(dev->OIMR))
78 {
79 bellbits = rkt_readl(dev, OutboundDoorbellReg);
80 if (bellbits & DoorBellPrintfReady) {
81 aac_printf(dev, rkt_readl (dev, IndexRegs.Mailbox[5]));
82 rkt_writel(dev, MUnit.ODR,DoorBellPrintfReady);
83 rkt_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
84 }
85 else if (bellbits & DoorBellAdapterNormCmdReady) {
86 rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
87 aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
88// rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
89 }
90 else if (bellbits & DoorBellAdapterNormRespReady) {
91 rkt_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
92 aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
93 }
94 else if (bellbits & DoorBellAdapterNormCmdNotFull) {
95 rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
96 }
97 else if (bellbits & DoorBellAdapterNormRespNotFull) {
98 rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
99 rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
100 }
101 return IRQ_HANDLED;
102 }
103 }
104 return IRQ_NONE;
105}
106
107/**
108 * aac_rkt_disable_interrupt - Disable interrupts
109 * @dev: Adapter
110 */
111
112static void aac_rkt_disable_interrupt(struct aac_dev *dev)
113{
114 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
115}
116
117/** 37/**
118 * rkt_sync_cmd - send a command and wait 38 * aac_rkt_ioremap
119 * @dev: Adapter 39 * @size: mapping resize request
120 * @command: Command to execute
121 * @p1: first parameter
122 * @ret: adapter status
123 * 40 *
124 * This routine will send a synchronous command to the adapter and wait
125 * for its completion.
126 */ 41 */
127 42static int aac_rkt_ioremap(struct aac_dev * dev, u32 size)
128static int rkt_sync_cmd(struct aac_dev *dev, u32 command,
129 u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
130 u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4)
131{ 43{
132 unsigned long start; 44 if (!size) {
133 int ok; 45 iounmap(dev->regs.rkt);
134 /* 46 return 0;
135 * Write the command into Mailbox 0
136 */
137 rkt_writel(dev, InboundMailbox0, command);
138 /*
139 * Write the parameters into Mailboxes 1 - 6
140 */
141 rkt_writel(dev, InboundMailbox1, p1);
142 rkt_writel(dev, InboundMailbox2, p2);
143 rkt_writel(dev, InboundMailbox3, p3);
144 rkt_writel(dev, InboundMailbox4, p4);
145 /*
146 * Clear the synch command doorbell to start on a clean slate.
147 */
148 rkt_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
149 /*
150 * Disable doorbell interrupts
151 */
152 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
153 /*
154 * Force the completion of the mask register write before issuing
155 * the interrupt.
156 */
157 rkt_readb (dev, MUnit.OIMR);
158 /*
159 * Signal that there is a new synch command
160 */
161 rkt_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0);
162
163 ok = 0;
164 start = jiffies;
165
166 /*
167 * Wait up to 30 seconds
168 */
169 while (time_before(jiffies, start+30*HZ))
170 {
171 udelay(5); /* Delay 5 microseconds to let Mon960 get info. */
172 /*
173 * Mon960 will set doorbell0 bit when it has completed the command.
174 */
175 if (rkt_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) {
176 /*
177 * Clear the doorbell.
178 */
179 rkt_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
180 ok = 1;
181 break;
182 }
183 /*
184 * Yield the processor in case we are slow
185 */
186 msleep(1);
187 } 47 }
188 if (ok != 1) { 48 dev->base = dev->regs.rkt = ioremap(dev->scsi_host_ptr->base, size);
189 /* 49 if (dev->base == NULL)
190 * Restore interrupt mask even though we timed out
191 */
192 if (dev->new_comm_interface)
193 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
194 else
195 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
196 return -ETIMEDOUT;
197 }
198 /*
199 * Pull the synch status from Mailbox 0.
200 */
201 if (status)
202 *status = rkt_readl(dev, IndexRegs.Mailbox[0]);
203 if (r1)
204 *r1 = rkt_readl(dev, IndexRegs.Mailbox[1]);
205 if (r2)
206 *r2 = rkt_readl(dev, IndexRegs.Mailbox[2]);
207 if (r3)
208 *r3 = rkt_readl(dev, IndexRegs.Mailbox[3]);
209 if (r4)
210 *r4 = rkt_readl(dev, IndexRegs.Mailbox[4]);
211 /*
212 * Clear the synch command doorbell.
213 */
214 rkt_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
215 /*
216 * Restore interrupt mask
217 */
218 if (dev->new_comm_interface)
219 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
220 else
221 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
222 return 0;
223
224}
225
226/**
227 * aac_rkt_interrupt_adapter - interrupt adapter
228 * @dev: Adapter
229 *
230 * Send an interrupt to the i960 and breakpoint it.
231 */
232
233static void aac_rkt_interrupt_adapter(struct aac_dev *dev)
234{
235 rkt_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0,
236 NULL, NULL, NULL, NULL, NULL);
237}
238
239/**
240 * aac_rkt_notify_adapter - send an event to the adapter
241 * @dev: Adapter
242 * @event: Event to send
243 *
244 * Notify the i960 that something it probably cares about has
245 * happened.
246 */
247
248static void aac_rkt_notify_adapter(struct aac_dev *dev, u32 event)
249{
250 switch (event) {
251
252 case AdapNormCmdQue:
253 rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1);
254 break;
255 case HostNormRespNotFull:
256 rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4);
257 break;
258 case AdapNormRespQue:
259 rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2);
260 break;
261 case HostNormCmdNotFull:
262 rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3);
263 break;
264 case HostShutdown:
265// rkt_sync_cmd(dev, HOST_CRASHING, 0, 0, 0, 0, 0, 0,
266// NULL, NULL, NULL, NULL, NULL);
267 break;
268 case FastIo:
269 rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6);
270 break;
271 case AdapPrintfDone:
272 rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5);
273 break;
274 default:
275 BUG();
276 break;
277 }
278}
279
280/**
281 * aac_rkt_start_adapter - activate adapter
282 * @dev: Adapter
283 *
284 * Start up processing on an i960 based AAC adapter
285 */
286
287static void aac_rkt_start_adapter(struct aac_dev *dev)
288{
289 struct aac_init *init;
290
291 init = dev->init;
292 init->HostElapsedSeconds = cpu_to_le32(get_seconds());
293 // We can only use a 32 bit address here
294 rkt_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
295 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
296}
297
298/**
299 * aac_rkt_check_health
300 * @dev: device to check if healthy
301 *
302 * Will attempt to determine if the specified adapter is alive and
303 * capable of handling requests, returning 0 if alive.
304 */
305static int aac_rkt_check_health(struct aac_dev *dev)
306{
307 u32 status = rkt_readl(dev, MUnit.OMRx[0]);
308
309 /*
310 * Check to see if the board failed any self tests.
311 */
312 if (status & SELF_TEST_FAILED)
313 return -1; 50 return -1;
314 /* 51 dev->IndexRegs = &dev->regs.rkt->IndexRegs;
315 * Check to see if the board panic'd.
316 */
317 if (status & KERNEL_PANIC) {
318 char * buffer;
319 struct POSTSTATUS {
320 __le32 Post_Command;
321 __le32 Post_Address;
322 } * post;
323 dma_addr_t paddr, baddr;
324 int ret;
325
326 if ((status & 0xFF000000L) == 0xBC000000L)
327 return (status >> 16) & 0xFF;
328 buffer = pci_alloc_consistent(dev->pdev, 512, &baddr);
329 ret = -2;
330 if (buffer == NULL)
331 return ret;
332 post = pci_alloc_consistent(dev->pdev,
333 sizeof(struct POSTSTATUS), &paddr);
334 if (post == NULL) {
335 pci_free_consistent(dev->pdev, 512, buffer, baddr);
336 return ret;
337 }
338 memset(buffer, 0, 512);
339 post->Post_Command = cpu_to_le32(COMMAND_POST_RESULTS);
340 post->Post_Address = cpu_to_le32(baddr);
341 rkt_writel(dev, MUnit.IMRx[0], paddr);
342 rkt_sync_cmd(dev, COMMAND_POST_RESULTS, baddr, 0, 0, 0, 0, 0,
343 NULL, NULL, NULL, NULL, NULL);
344 pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS),
345 post, paddr);
346 if ((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X'))) {
347 ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10);
348 ret <<= 4;
349 ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10);
350 }
351 pci_free_consistent(dev->pdev, 512, buffer, baddr);
352 return ret;
353 }
354 /*
355 * Wait for the adapter to be up and running.
356 */
357 if (!(status & KERNEL_UP_AND_RUNNING))
358 return -3;
359 /*
360 * Everything is OK
361 */
362 return 0;
363}
364
365/**
366 * aac_rkt_send
367 * @fib: fib to issue
368 *
369 * Will send a fib, returning 0 if successful.
370 */
371static int aac_rkt_send(struct fib * fib)
372{
373 u64 addr = fib->hw_fib_pa;
374 struct aac_dev *dev = fib->dev;
375 volatile void __iomem *device = dev->regs.rkt;
376 u32 Index;
377
378 dprintk((KERN_DEBUG "%p->aac_rkt_send(%p->%llx)\n", dev, fib, addr));
379 Index = rkt_readl(dev, MUnit.InboundQueue);
380 if (Index == 0xFFFFFFFFL)
381 Index = rkt_readl(dev, MUnit.InboundQueue);
382 dprintk((KERN_DEBUG "Index = 0x%x\n", Index));
383 if (Index == 0xFFFFFFFFL)
384 return Index;
385 device += Index;
386 dprintk((KERN_DEBUG "entry = %x %x %u\n", (u32)(addr & 0xffffffff),
387 (u32)(addr >> 32), (u32)le16_to_cpu(fib->hw_fib->header.Size)));
388 writel((u32)(addr & 0xffffffff), device);
389 device += sizeof(u32);
390 writel((u32)(addr >> 32), device);
391 device += sizeof(u32);
392 writel(le16_to_cpu(fib->hw_fib->header.Size), device);
393 rkt_writel(dev, MUnit.InboundQueue, Index);
394 dprintk((KERN_DEBUG "aac_rkt_send - return 0\n"));
395 return 0; 52 return 0;
396} 53}
397 54
@@ -406,78 +63,18 @@ static int aac_rkt_send(struct fib * fib)
406 63
407int aac_rkt_init(struct aac_dev *dev) 64int aac_rkt_init(struct aac_dev *dev)
408{ 65{
409 unsigned long start; 66 int retval;
410 unsigned long status; 67 extern int _aac_rx_init(struct aac_dev *dev);
411 int instance; 68 extern void aac_rx_start_adapter(struct aac_dev *dev);
412 const char * name;
413
414 instance = dev->id;
415 name = dev->name;
416 69
417 /* 70 /*
418 * Check to see if the board panic'd while booting.
419 */
420 /*
421 * Check to see if the board failed any self tests.
422 */
423 if (rkt_readl(dev, MUnit.OMRx[0]) & SELF_TEST_FAILED) {
424 printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
425 goto error_iounmap;
426 }
427 /*
428 * Check to see if the monitor panic'd while booting.
429 */
430 if (rkt_readl(dev, MUnit.OMRx[0]) & MONITOR_PANIC) {
431 printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
432 goto error_iounmap;
433 }
434 /*
435 * Check to see if the board panic'd while booting.
436 */
437 if (rkt_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) {
438 printk(KERN_ERR "%s%d: adapter kernel panic'd.\n", dev->name, instance);
439 goto error_iounmap;
440 }
441 start = jiffies;
442 /*
443 * Wait for the adapter to be up and running. Wait up to 3 minutes
444 */
445 while (!(rkt_readl(dev, MUnit.OMRx[0]) & KERNEL_UP_AND_RUNNING))
446 {
447 if(time_after(jiffies, start+startup_timeout*HZ))
448 {
449 status = rkt_readl(dev, MUnit.OMRx[0]);
450 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
451 dev->name, instance, status);
452 goto error_iounmap;
453 }
454 msleep(1);
455 }
456 if (request_irq(dev->scsi_host_ptr->irq, aac_rkt_intr, IRQF_SHARED|IRQF_DISABLED, "aacraid", (void *)dev)<0)
457 {
458 printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance);
459 goto error_iounmap;
460 }
461 /*
462 * Fill in the function dispatch table. 71 * Fill in the function dispatch table.
463 */ 72 */
464 dev->a_ops.adapter_interrupt = aac_rkt_interrupt_adapter; 73 dev->a_ops.adapter_ioremap = aac_rkt_ioremap;
465 dev->a_ops.adapter_disable_int = aac_rkt_disable_interrupt;
466 dev->a_ops.adapter_notify = aac_rkt_notify_adapter;
467 dev->a_ops.adapter_sync_cmd = rkt_sync_cmd;
468 dev->a_ops.adapter_check_health = aac_rkt_check_health;
469 dev->a_ops.adapter_send = aac_rkt_send;
470
471 /*
472 * First clear out all interrupts. Then enable the one's that we
473 * can handle.
474 */
475 rkt_writeb(dev, MUnit.OIMR, 0xff);
476 rkt_writel(dev, MUnit.ODR, 0xffffffff);
477 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
478 74
479 if (aac_init_adapter(dev) == NULL) 75 retval = _aac_rx_init(dev);
480 goto error_irq; 76 if (retval)
77 return retval;
481 if (dev->new_comm_interface) { 78 if (dev->new_comm_interface) {
482 /* 79 /*
483 * FIB Setup has already been done, but we can minimize the 80 * FIB Setup has already been done, but we can minimize the
@@ -494,20 +91,11 @@ int aac_rkt_init(struct aac_dev *dev)
494 dev->init->MaxIoCommands = cpu_to_le32(246); 91 dev->init->MaxIoCommands = cpu_to_le32(246);
495 dev->scsi_host_ptr->can_queue = 246 - AAC_NUM_MGT_FIB; 92 dev->scsi_host_ptr->can_queue = 246 - AAC_NUM_MGT_FIB;
496 } 93 }
497 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
498 } 94 }
499 /* 95 /*
500 * Tell the adapter that all is configured, and it can start 96 * Tell the adapter that all is configured, and it can start
501 * accepting requests 97 * accepting requests
502 */ 98 */
503 aac_rkt_start_adapter(dev); 99 aac_rx_start_adapter(dev);
504 return 0; 100 return 0;
505
506error_irq:
507 rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
508 free_irq(dev->scsi_host_ptr->irq, (void *)dev);
509
510error_iounmap:
511
512 return -1;
513} 101}
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 035018db69b1..a1d214d770eb 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -79,7 +79,7 @@ static irqreturn_t aac_rx_intr(int irq, void *dev_id, struct pt_regs *regs)
79 { 79 {
80 bellbits = rx_readl(dev, OutboundDoorbellReg); 80 bellbits = rx_readl(dev, OutboundDoorbellReg);
81 if (bellbits & DoorBellPrintfReady) { 81 if (bellbits & DoorBellPrintfReady) {
82 aac_printf(dev, rx_readl (dev, IndexRegs.Mailbox[5])); 82 aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5]));
83 rx_writel(dev, MUnit.ODR,DoorBellPrintfReady); 83 rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
84 rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone); 84 rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
85 } 85 }
@@ -134,14 +134,14 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command,
134 /* 134 /*
135 * Write the command into Mailbox 0 135 * Write the command into Mailbox 0
136 */ 136 */
137 rx_writel(dev, InboundMailbox0, command); 137 writel(command, &dev->IndexRegs->Mailbox[0]);
138 /* 138 /*
139 * Write the parameters into Mailboxes 1 - 6 139 * Write the parameters into Mailboxes 1 - 6
140 */ 140 */
141 rx_writel(dev, InboundMailbox1, p1); 141 writel(p1, &dev->IndexRegs->Mailbox[1]);
142 rx_writel(dev, InboundMailbox2, p2); 142 writel(p2, &dev->IndexRegs->Mailbox[2]);
143 rx_writel(dev, InboundMailbox3, p3); 143 writel(p3, &dev->IndexRegs->Mailbox[3]);
144 rx_writel(dev, InboundMailbox4, p4); 144 writel(p4, &dev->IndexRegs->Mailbox[4]);
145 /* 145 /*
146 * Clear the synch command doorbell to start on a clean slate. 146 * Clear the synch command doorbell to start on a clean slate.
147 */ 147 */
@@ -199,15 +199,15 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command,
199 * Pull the synch status from Mailbox 0. 199 * Pull the synch status from Mailbox 0.
200 */ 200 */
201 if (status) 201 if (status)
202 *status = rx_readl(dev, IndexRegs.Mailbox[0]); 202 *status = readl(&dev->IndexRegs->Mailbox[0]);
203 if (r1) 203 if (r1)
204 *r1 = rx_readl(dev, IndexRegs.Mailbox[1]); 204 *r1 = readl(&dev->IndexRegs->Mailbox[1]);
205 if (r2) 205 if (r2)
206 *r2 = rx_readl(dev, IndexRegs.Mailbox[2]); 206 *r2 = readl(&dev->IndexRegs->Mailbox[2]);
207 if (r3) 207 if (r3)
208 *r3 = rx_readl(dev, IndexRegs.Mailbox[3]); 208 *r3 = readl(&dev->IndexRegs->Mailbox[3]);
209 if (r4) 209 if (r4)
210 *r4 = rx_readl(dev, IndexRegs.Mailbox[4]); 210 *r4 = readl(&dev->IndexRegs->Mailbox[4]);
211 /* 211 /*
212 * Clear the synch command doorbell. 212 * Clear the synch command doorbell.
213 */ 213 */
@@ -261,8 +261,6 @@ static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
261 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3); 261 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3);
262 break; 262 break;
263 case HostShutdown: 263 case HostShutdown:
264// rx_sync_cmd(dev, HOST_CRASHING, 0, 0, 0, 0, 0, 0,
265// NULL, NULL, NULL, NULL, NULL);
266 break; 264 break;
267 case FastIo: 265 case FastIo:
268 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6); 266 rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6);
@@ -283,7 +281,7 @@ static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
283 * Start up processing on an i960 based AAC adapter 281 * Start up processing on an i960 based AAC adapter
284 */ 282 */
285 283
286static void aac_rx_start_adapter(struct aac_dev *dev) 284void aac_rx_start_adapter(struct aac_dev *dev)
287{ 285{
288 struct aac_init *init; 286 struct aac_init *init;
289 287
@@ -381,7 +379,7 @@ static int aac_rx_send(struct fib * fib)
381 dprintk((KERN_DEBUG "Index = 0x%x\n", Index)); 379 dprintk((KERN_DEBUG "Index = 0x%x\n", Index));
382 if (Index == 0xFFFFFFFFL) 380 if (Index == 0xFFFFFFFFL)
383 return Index; 381 return Index;
384 device += Index; 382 device = dev->base + Index;
385 dprintk((KERN_DEBUG "entry = %x %x %u\n", (u32)(addr & 0xffffffff), 383 dprintk((KERN_DEBUG "entry = %x %x %u\n", (u32)(addr & 0xffffffff),
386 (u32)(addr >> 32), (u32)le16_to_cpu(fib->hw_fib->header.Size))); 384 (u32)(addr >> 32), (u32)le16_to_cpu(fib->hw_fib->header.Size)));
387 writel((u32)(addr & 0xffffffff), device); 385 writel((u32)(addr & 0xffffffff), device);
@@ -395,6 +393,43 @@ static int aac_rx_send(struct fib * fib)
395} 393}
396 394
397/** 395/**
396 * aac_rx_ioremap
397 * @size: mapping resize request
398 *
399 */
400static int aac_rx_ioremap(struct aac_dev * dev, u32 size)
401{
402 if (!size) {
403 iounmap(dev->regs.rx);
404 return 0;
405 }
406 dev->base = dev->regs.rx = ioremap(dev->scsi_host_ptr->base, size);
407 if (dev->base == NULL)
408 return -1;
409 dev->IndexRegs = &dev->regs.rx->IndexRegs;
410 return 0;
411}
412
413static int aac_rx_restart_adapter(struct aac_dev *dev)
414{
415 u32 var;
416
417 printk(KERN_ERR "%s%d: adapter kernel panic'd.\n",
418 dev->name, dev->id);
419
420 if (aac_rx_check_health(dev) <= 0)
421 return 1;
422 if (rx_sync_cmd(dev, IOP_RESET, 0, 0, 0, 0, 0, 0,
423 &var, NULL, NULL, NULL, NULL))
424 return 1;
425 if (var != 0x00000001)
426 return 1;
427 if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC)
428 return 1;
429 return 0;
430}
431
432/**
398 * aac_rx_init - initialize an i960 based AAC card 433 * aac_rx_init - initialize an i960 based AAC card
399 * @dev: device to configure 434 * @dev: device to configure
400 * 435 *
@@ -403,7 +438,7 @@ static int aac_rx_send(struct fib * fib)
403 * to the comm region. 438 * to the comm region.
404 */ 439 */
405 440
406int aac_rx_init(struct aac_dev *dev) 441int _aac_rx_init(struct aac_dev *dev)
407{ 442{
408 unsigned long start; 443 unsigned long start;
409 unsigned long status; 444 unsigned long status;
@@ -413,27 +448,30 @@ int aac_rx_init(struct aac_dev *dev)
413 instance = dev->id; 448 instance = dev->id;
414 name = dev->name; 449 name = dev->name;
415 450
451 if (aac_adapter_ioremap(dev, dev->base_size)) {
452 printk(KERN_WARNING "%s: unable to map adapter.\n", name);
453 goto error_iounmap;
454 }
455
416 /* 456 /*
417 * Check to see if the board panic'd while booting. 457 * Check to see if the board panic'd while booting.
418 */ 458 */
459 status = rx_readl(dev, MUnit.OMRx[0]);
460 if (status & KERNEL_PANIC)
461 if (aac_rx_restart_adapter(dev))
462 goto error_iounmap;
419 /* 463 /*
420 * Check to see if the board failed any self tests. 464 * Check to see if the board failed any self tests.
421 */ 465 */
422 if (rx_readl(dev, MUnit.OMRx[0]) & SELF_TEST_FAILED) { 466 status = rx_readl(dev, MUnit.OMRx[0]);
467 if (status & SELF_TEST_FAILED) {
423 printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance); 468 printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
424 goto error_iounmap; 469 goto error_iounmap;
425 } 470 }
426 /* 471 /*
427 * Check to see if the board panic'd while booting.
428 */
429 if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) {
430 printk(KERN_ERR "%s%d: adapter kernel panic.\n", dev->name, instance);
431 goto error_iounmap;
432 }
433 /*
434 * Check to see if the monitor panic'd while booting. 472 * Check to see if the monitor panic'd while booting.
435 */ 473 */
436 if (rx_readl(dev, MUnit.OMRx[0]) & MONITOR_PANIC) { 474 if (status & MONITOR_PANIC) {
437 printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance); 475 printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
438 goto error_iounmap; 476 goto error_iounmap;
439 } 477 }
@@ -441,12 +479,10 @@ int aac_rx_init(struct aac_dev *dev)
441 /* 479 /*
442 * Wait for the adapter to be up and running. Wait up to 3 minutes 480 * Wait for the adapter to be up and running. Wait up to 3 minutes
443 */ 481 */
444 while ((!(rx_readl(dev, IndexRegs.Mailbox[7]) & KERNEL_UP_AND_RUNNING)) 482 while (!((status = rx_readl(dev, MUnit.OMRx[0])) & KERNEL_UP_AND_RUNNING))
445 || (!(rx_readl(dev, MUnit.OMRx[0]) & KERNEL_UP_AND_RUNNING)))
446 { 483 {
447 if(time_after(jiffies, start+startup_timeout*HZ)) 484 if(time_after(jiffies, start+startup_timeout*HZ))
448 { 485 {
449 status = rx_readl(dev, IndexRegs.Mailbox[7]);
450 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", 486 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
451 dev->name, instance, status); 487 dev->name, instance, status);
452 goto error_iounmap; 488 goto error_iounmap;
@@ -481,11 +517,6 @@ int aac_rx_init(struct aac_dev *dev)
481 if (dev->new_comm_interface) 517 if (dev->new_comm_interface)
482 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7); 518 rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
483 519
484 /*
485 * Tell the adapter that all is configured, and it can start
486 * accepting requests
487 */
488 aac_rx_start_adapter(dev);
489 return 0; 520 return 0;
490 521
491error_irq: 522error_irq:
@@ -496,3 +527,23 @@ error_iounmap:
496 527
497 return -1; 528 return -1;
498} 529}
530
531int aac_rx_init(struct aac_dev *dev)
532{
533 int retval;
534
535 /*
536 * Fill in the function dispatch table.
537 */
538 dev->a_ops.adapter_ioremap = aac_rx_ioremap;
539
540 retval = _aac_rx_init(dev);
541 if (!retval) {
542 /*
543 * Tell the adapter that all is configured, and it can
544 * start accepting requests
545 */
546 aac_rx_start_adapter(dev);
547 }
548 return retval;
549}
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index cd586cc8f9be..f906ead239dd 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -281,6 +281,21 @@ static int aac_sa_check_health(struct aac_dev *dev)
281} 281}
282 282
283/** 283/**
284 * aac_sa_ioremap
285 * @size: mapping resize request
286 *
287 */
288static int aac_sa_ioremap(struct aac_dev * dev, u32 size)
289{
290 if (!size) {
291 iounmap(dev->regs.sa);
292 return 0;
293 }
294 dev->base = dev->regs.sa = ioremap(dev->scsi_host_ptr->base, size);
295 return (dev->base == NULL) ? -1 : 0;
296}
297
298/**
284 * aac_sa_init - initialize an ARM based AAC card 299 * aac_sa_init - initialize an ARM based AAC card
285 * @dev: device to configure 300 * @dev: device to configure
286 * 301 *
@@ -299,6 +314,11 @@ int aac_sa_init(struct aac_dev *dev)
299 instance = dev->id; 314 instance = dev->id;
300 name = dev->name; 315 name = dev->name;
301 316
317 if (aac_sa_ioremap(dev, dev->base_size)) {
318 printk(KERN_WARNING "%s: unable to map adapter.\n", name);
319 goto error_iounmap;
320 }
321
302 /* 322 /*
303 * Check to see if the board failed any self tests. 323 * Check to see if the board failed any self tests.
304 */ 324 */
@@ -341,6 +361,7 @@ int aac_sa_init(struct aac_dev *dev)
341 dev->a_ops.adapter_notify = aac_sa_notify_adapter; 361 dev->a_ops.adapter_notify = aac_sa_notify_adapter;
342 dev->a_ops.adapter_sync_cmd = sa_sync_cmd; 362 dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
343 dev->a_ops.adapter_check_health = aac_sa_check_health; 363 dev->a_ops.adapter_check_health = aac_sa_check_health;
364 dev->a_ops.adapter_ioremap = aac_sa_ioremap;
344 365
345 /* 366 /*
346 * First clear out all interrupts. Then enable the one's that 367 * First clear out all interrupts. Then enable the one's that
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index e32b4ab2f8fb..773f02e3b10b 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -888,10 +888,6 @@ typedef unsigned char uchar;
888#define ASC_PCI_ID2DEV(id) (((id) >> 11) & 0x1F) 888#define ASC_PCI_ID2DEV(id) (((id) >> 11) & 0x1F)
889#define ASC_PCI_ID2FUNC(id) (((id) >> 8) & 0x7) 889#define ASC_PCI_ID2FUNC(id) (((id) >> 8) & 0x7)
890#define ASC_PCI_MKID(bus, dev, func) ((((dev) & 0x1F) << 11) | (((func) & 0x7) << 8) | ((bus) & 0xFF)) 890#define ASC_PCI_MKID(bus, dev, func) ((((dev) & 0x1F) << 11) | (((func) & 0x7) << 8) | ((bus) & 0xFF))
891#define ASC_PCI_VENDORID 0x10CD
892#define ASC_PCI_DEVICEID_1200A 0x1100
893#define ASC_PCI_DEVICEID_1200B 0x1200
894#define ASC_PCI_DEVICEID_ULTRA 0x1300
895#define ASC_PCI_REVISION_3150 0x02 891#define ASC_PCI_REVISION_3150 0x02
896#define ASC_PCI_REVISION_3050 0x03 892#define ASC_PCI_REVISION_3050 0x03
897 893
@@ -899,6 +895,14 @@ typedef unsigned char uchar;
899#define ASC_DVCLIB_CALL_FAILED (0) 895#define ASC_DVCLIB_CALL_FAILED (0)
900#define ASC_DVCLIB_CALL_ERROR (-1) 896#define ASC_DVCLIB_CALL_ERROR (-1)
901 897
898#define PCI_VENDOR_ID_ASP 0x10cd
899#define PCI_DEVICE_ID_ASP_1200A 0x1100
900#define PCI_DEVICE_ID_ASP_ABP940 0x1200
901#define PCI_DEVICE_ID_ASP_ABP940U 0x1300
902#define PCI_DEVICE_ID_ASP_ABP940UW 0x2300
903#define PCI_DEVICE_ID_38C0800_REV1 0x2500
904#define PCI_DEVICE_ID_38C1600_REV1 0x2700
905
902/* 906/*
903 * Enable CC_VERY_LONG_SG_LIST to support up to 64K element SG lists. 907 * Enable CC_VERY_LONG_SG_LIST to support up to 64K element SG lists.
904 * The SRB structure will have to be changed and the ASC_SRB2SCSIQ() 908 * The SRB structure will have to be changed and the ASC_SRB2SCSIQ()
@@ -1492,8 +1496,6 @@ typedef struct asc_dvc_cfg {
1492#define ASC_INIT_STATE_END_INQUIRY 0x0080 1496#define ASC_INIT_STATE_END_INQUIRY 0x0080
1493#define ASC_INIT_RESET_SCSI_DONE 0x0100 1497#define ASC_INIT_RESET_SCSI_DONE 0x0100
1494#define ASC_INIT_STATE_WITHOUT_EEP 0x8000 1498#define ASC_INIT_STATE_WITHOUT_EEP 0x8000
1495#define ASC_PCI_DEVICE_ID_REV_A 0x1100
1496#define ASC_PCI_DEVICE_ID_REV_B 0x1200
1497#define ASC_BUG_FIX_IF_NOT_DWB 0x0001 1499#define ASC_BUG_FIX_IF_NOT_DWB 0x0001
1498#define ASC_BUG_FIX_ASYN_USE_SYN 0x0002 1500#define ASC_BUG_FIX_ASYN_USE_SYN 0x0002
1499#define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41 1501#define ASYN_SDTR_DATA_FIX_PCI_REV_AB 0x41
@@ -2100,12 +2102,6 @@ STATIC ASC_DCNT AscGetMaxDmaCount(ushort);
2100#define ADV_NUM_PAGE_CROSSING \ 2102#define ADV_NUM_PAGE_CROSSING \
2101 ((ADV_SG_TOTAL_MEM_SIZE + (ADV_PAGE_SIZE - 1))/ADV_PAGE_SIZE) 2103 ((ADV_SG_TOTAL_MEM_SIZE + (ADV_PAGE_SIZE - 1))/ADV_PAGE_SIZE)
2102 2104
2103/* a_condor.h */
2104#define ADV_PCI_VENDOR_ID 0x10CD
2105#define ADV_PCI_DEVICE_ID_REV_A 0x2300
2106#define ADV_PCI_DEVID_38C0800_REV1 0x2500
2107#define ADV_PCI_DEVID_38C1600_REV1 0x2700
2108
2109#define ADV_EEP_DVC_CFG_BEGIN (0x00) 2105#define ADV_EEP_DVC_CFG_BEGIN (0x00)
2110#define ADV_EEP_DVC_CFG_END (0x15) 2106#define ADV_EEP_DVC_CFG_END (0x15)
2111#define ADV_EEP_DVC_CTL_BEGIN (0x16) /* location of OEM name */ 2107#define ADV_EEP_DVC_CTL_BEGIN (0x16) /* location of OEM name */
@@ -3569,14 +3565,7 @@ typedef struct scsi_cmnd REQ, *REQP;
3569#define PCI_MAX_SLOT 0x1F 3565#define PCI_MAX_SLOT 0x1F
3570#define PCI_MAX_BUS 0xFF 3566#define PCI_MAX_BUS 0xFF
3571#define PCI_IOADDRESS_MASK 0xFFFE 3567#define PCI_IOADDRESS_MASK 0xFFFE
3572#define ASC_PCI_VENDORID 0x10CD
3573#define ASC_PCI_DEVICE_ID_CNT 6 /* PCI Device ID count. */ 3568#define ASC_PCI_DEVICE_ID_CNT 6 /* PCI Device ID count. */
3574#define ASC_PCI_DEVICE_ID_1100 0x1100
3575#define ASC_PCI_DEVICE_ID_1200 0x1200
3576#define ASC_PCI_DEVICE_ID_1300 0x1300
3577#define ASC_PCI_DEVICE_ID_2300 0x2300 /* ASC-3550 */
3578#define ASC_PCI_DEVICE_ID_2500 0x2500 /* ASC-38C0800 */
3579#define ASC_PCI_DEVICE_ID_2700 0x2700 /* ASC-38C1600 */
3580 3569
3581#ifndef ADVANSYS_STATS 3570#ifndef ADVANSYS_STATS
3582#define ASC_STATS(shp, counter) 3571#define ASC_STATS(shp, counter)
@@ -4330,12 +4319,12 @@ advansys_detect(struct scsi_host_template *tpnt)
4330 struct pci_dev *pci_devp = NULL; 4319 struct pci_dev *pci_devp = NULL;
4331 int pci_device_id_cnt = 0; 4320 int pci_device_id_cnt = 0;
4332 unsigned int pci_device_id[ASC_PCI_DEVICE_ID_CNT] = { 4321 unsigned int pci_device_id[ASC_PCI_DEVICE_ID_CNT] = {
4333 ASC_PCI_DEVICE_ID_1100, 4322 PCI_DEVICE_ID_ASP_1200A,
4334 ASC_PCI_DEVICE_ID_1200, 4323 PCI_DEVICE_ID_ASP_ABP940,
4335 ASC_PCI_DEVICE_ID_1300, 4324 PCI_DEVICE_ID_ASP_ABP940U,
4336 ASC_PCI_DEVICE_ID_2300, 4325 PCI_DEVICE_ID_ASP_ABP940UW,
4337 ASC_PCI_DEVICE_ID_2500, 4326 PCI_DEVICE_ID_38C0800_REV1,
4338 ASC_PCI_DEVICE_ID_2700 4327 PCI_DEVICE_ID_38C1600_REV1
4339 }; 4328 };
4340 ADV_PADDR pci_memory_address; 4329 ADV_PADDR pci_memory_address;
4341#endif /* CONFIG_PCI */ 4330#endif /* CONFIG_PCI */
@@ -4471,7 +4460,7 @@ advansys_detect(struct scsi_host_template *tpnt)
4471 4460
4472 /* Find all PCI cards. */ 4461 /* Find all PCI cards. */
4473 while (pci_device_id_cnt < ASC_PCI_DEVICE_ID_CNT) { 4462 while (pci_device_id_cnt < ASC_PCI_DEVICE_ID_CNT) {
4474 if ((pci_devp = pci_find_device(ASC_PCI_VENDORID, 4463 if ((pci_devp = pci_find_device(PCI_VENDOR_ID_ASP,
4475 pci_device_id[pci_device_id_cnt], pci_devp)) == 4464 pci_device_id[pci_device_id_cnt], pci_devp)) ==
4476 NULL) { 4465 NULL) {
4477 pci_device_id_cnt++; 4466 pci_device_id_cnt++;
@@ -4575,9 +4564,9 @@ advansys_detect(struct scsi_host_template *tpnt)
4575 */ 4564 */
4576#ifdef CONFIG_PCI 4565#ifdef CONFIG_PCI
4577 if (asc_bus[bus] == ASC_IS_PCI && 4566 if (asc_bus[bus] == ASC_IS_PCI &&
4578 (pci_devp->device == ASC_PCI_DEVICE_ID_2300 || 4567 (pci_devp->device == PCI_DEVICE_ID_ASP_ABP940UW ||
4579 pci_devp->device == ASC_PCI_DEVICE_ID_2500 || 4568 pci_devp->device == PCI_DEVICE_ID_38C0800_REV1 ||
4580 pci_devp->device == ASC_PCI_DEVICE_ID_2700)) 4569 pci_devp->device == PCI_DEVICE_ID_38C1600_REV1))
4581 { 4570 {
4582 boardp->flags |= ASC_IS_WIDE_BOARD; 4571 boardp->flags |= ASC_IS_WIDE_BOARD;
4583 } 4572 }
@@ -4600,11 +4589,11 @@ advansys_detect(struct scsi_host_template *tpnt)
4600 adv_dvc_varp->isr_callback = adv_isr_callback; 4589 adv_dvc_varp->isr_callback = adv_isr_callback;
4601 adv_dvc_varp->async_callback = adv_async_callback; 4590 adv_dvc_varp->async_callback = adv_async_callback;
4602#ifdef CONFIG_PCI 4591#ifdef CONFIG_PCI
4603 if (pci_devp->device == ASC_PCI_DEVICE_ID_2300) 4592 if (pci_devp->device == PCI_DEVICE_ID_ASP_ABP940UW)
4604 { 4593 {
4605 ASC_DBG(1, "advansys_detect: ASC-3550\n"); 4594 ASC_DBG(1, "advansys_detect: ASC-3550\n");
4606 adv_dvc_varp->chip_type = ADV_CHIP_ASC3550; 4595 adv_dvc_varp->chip_type = ADV_CHIP_ASC3550;
4607 } else if (pci_devp->device == ASC_PCI_DEVICE_ID_2500) 4596 } else if (pci_devp->device == PCI_DEVICE_ID_38C0800_REV1)
4608 { 4597 {
4609 ASC_DBG(1, "advansys_detect: ASC-38C0800\n"); 4598 ASC_DBG(1, "advansys_detect: ASC-38C0800\n");
4610 adv_dvc_varp->chip_type = ADV_CHIP_ASC38C0800; 4599 adv_dvc_varp->chip_type = ADV_CHIP_ASC38C0800;
@@ -11922,7 +11911,7 @@ AscInitGetConfig(
11922 PCIRevisionID = DvcReadPCIConfigByte(asc_dvc, 11911 PCIRevisionID = DvcReadPCIConfigByte(asc_dvc,
11923 AscPCIConfigRevisionIDRegister); 11912 AscPCIConfigRevisionIDRegister);
11924 11913
11925 if (PCIVendorID != ASC_PCI_VENDORID) { 11914 if (PCIVendorID != PCI_VENDOR_ID_ASP) {
11926 warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE; 11915 warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE;
11927 } 11916 }
11928 prevCmdRegBits = DvcReadPCIConfigByte(asc_dvc, 11917 prevCmdRegBits = DvcReadPCIConfigByte(asc_dvc,
@@ -11942,15 +11931,15 @@ AscInitGetConfig(
11942 warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE; 11931 warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE;
11943 } 11932 }
11944 } 11933 }
11945 if ((PCIDeviceID == ASC_PCI_DEVICEID_1200A) || 11934 if ((PCIDeviceID == PCI_DEVICE_ID_ASP_1200A) ||
11946 (PCIDeviceID == ASC_PCI_DEVICEID_1200B)) { 11935 (PCIDeviceID == PCI_DEVICE_ID_ASP_ABP940)) {
11947 DvcWritePCIConfigByte(asc_dvc, 11936 DvcWritePCIConfigByte(asc_dvc,
11948 AscPCIConfigLatencyTimer, 0x00); 11937 AscPCIConfigLatencyTimer, 0x00);
11949 if (DvcReadPCIConfigByte(asc_dvc, AscPCIConfigLatencyTimer) 11938 if (DvcReadPCIConfigByte(asc_dvc, AscPCIConfigLatencyTimer)
11950 != 0x00) { 11939 != 0x00) {
11951 warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE; 11940 warn_code |= ASC_WARN_SET_PCI_CONFIG_SPACE;
11952 } 11941 }
11953 } else if (PCIDeviceID == ASC_PCI_DEVICEID_ULTRA) { 11942 } else if (PCIDeviceID == PCI_DEVICE_ID_ASP_ABP940U) {
11954 if (DvcReadPCIConfigByte(asc_dvc, 11943 if (DvcReadPCIConfigByte(asc_dvc,
11955 AscPCIConfigLatencyTimer) < 0x20) { 11944 AscPCIConfigLatencyTimer) < 0x20) {
11956 DvcWritePCIConfigByte(asc_dvc, 11945 DvcWritePCIConfigByte(asc_dvc,
@@ -12037,8 +12026,8 @@ AscInitFromAscDvcVar(
12037 AscSetChipCfgMsw(iop_base, cfg_msw); 12026 AscSetChipCfgMsw(iop_base, cfg_msw);
12038 if ((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA) { 12027 if ((asc_dvc->bus_type & ASC_IS_PCI_ULTRA) == ASC_IS_PCI_ULTRA) {
12039 } else { 12028 } else {
12040 if ((pci_device_id == ASC_PCI_DEVICE_ID_REV_A) || 12029 if ((pci_device_id == PCI_DEVICE_ID_ASP_1200A) ||
12041 (pci_device_id == ASC_PCI_DEVICE_ID_REV_B)) { 12030 (pci_device_id == PCI_DEVICE_ID_ASP_ABP940)) {
12042 asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_IF_NOT_DWB; 12031 asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_IF_NOT_DWB;
12043 asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_ASYN_USE_SYN; 12032 asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_ASYN_USE_SYN;
12044 } 12033 }
@@ -14275,8 +14264,8 @@ Default_38C0800_EEPROM_Config __initdata = {
14275 0, /* 55 reserved */ 14264 0, /* 55 reserved */
14276 0, /* 56 cisptr_lsw */ 14265 0, /* 56 cisptr_lsw */
14277 0, /* 57 cisprt_msw */ 14266 0, /* 57 cisprt_msw */
14278 ADV_PCI_VENDOR_ID, /* 58 subsysvid */ 14267 PCI_VENDOR_ID_ASP, /* 58 subsysvid */
14279 ADV_PCI_DEVID_38C0800_REV1, /* 59 subsysid */ 14268 PCI_DEVICE_ID_38C0800_REV1, /* 59 subsysid */
14280 0, /* 60 reserved */ 14269 0, /* 60 reserved */
14281 0, /* 61 reserved */ 14270 0, /* 61 reserved */
14282 0, /* 62 reserved */ 14271 0, /* 62 reserved */
@@ -14405,8 +14394,8 @@ Default_38C1600_EEPROM_Config __initdata = {
14405 0, /* 55 reserved */ 14394 0, /* 55 reserved */
14406 0, /* 56 cisptr_lsw */ 14395 0, /* 56 cisptr_lsw */
14407 0, /* 57 cisprt_msw */ 14396 0, /* 57 cisprt_msw */
14408 ADV_PCI_VENDOR_ID, /* 58 subsysvid */ 14397 PCI_VENDOR_ID_ASP, /* 58 subsysvid */
14409 ADV_PCI_DEVID_38C1600_REV1, /* 59 subsysid */ 14398 PCI_DEVICE_ID_38C1600_REV1, /* 59 subsysid */
14410 0, /* 60 reserved */ 14399 0, /* 60 reserved */
14411 0, /* 61 reserved */ 14400 0, /* 61 reserved */
14412 0, /* 62 reserved */ 14401 0, /* 62 reserved */
@@ -18225,3 +18214,22 @@ AdvInquiryHandling(
18225 } 18214 }
18226} 18215}
18227MODULE_LICENSE("Dual BSD/GPL"); 18216MODULE_LICENSE("Dual BSD/GPL");
18217
18218/* PCI Devices supported by this driver */
18219static struct pci_device_id advansys_pci_tbl[] __devinitdata = {
18220 { PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_1200A,
18221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
18222 { PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940,
18223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
18224 { PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940U,
18225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
18226 { PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940UW,
18227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
18228 { PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_38C0800_REV1,
18229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
18230 { PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_38C1600_REV1,
18231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
18232 { }
18233};
18234MODULE_DEVICE_TABLE(pci, advansys_pci_tbl);
18235
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index f974869ea323..fb6a476eb873 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -253,6 +253,7 @@
253#include <linux/isapnp.h> 253#include <linux/isapnp.h>
254#include <linux/spinlock.h> 254#include <linux/spinlock.h>
255#include <linux/workqueue.h> 255#include <linux/workqueue.h>
256#include <linux/list.h>
256#include <asm/semaphore.h> 257#include <asm/semaphore.h>
257#include <scsi/scsicam.h> 258#include <scsi/scsicam.h>
258 259
@@ -262,6 +263,8 @@
262#include <scsi/scsi_transport_spi.h> 263#include <scsi/scsi_transport_spi.h>
263#include "aha152x.h" 264#include "aha152x.h"
264 265
266static LIST_HEAD(aha152x_host_list);
267
265 268
266/* DEFINES */ 269/* DEFINES */
267 270
@@ -423,8 +426,6 @@ MODULE_DEVICE_TABLE(isapnp, id_table);
423 426
424#endif /* !PCMCIA */ 427#endif /* !PCMCIA */
425 428
426static int registered_count=0;
427static struct Scsi_Host *aha152x_host[2];
428static struct scsi_host_template aha152x_driver_template; 429static struct scsi_host_template aha152x_driver_template;
429 430
430/* 431/*
@@ -541,6 +542,7 @@ struct aha152x_hostdata {
541#ifdef __ISAPNP__ 542#ifdef __ISAPNP__
542 struct pnp_dev *pnpdev; 543 struct pnp_dev *pnpdev;
543#endif 544#endif
545 struct list_head host_list;
544}; 546};
545 547
546 548
@@ -755,20 +757,9 @@ static inline Scsi_Cmnd *remove_SC(Scsi_Cmnd **SC, Scsi_Cmnd *SCp)
755 return ptr; 757 return ptr;
756} 758}
757 759
758static inline struct Scsi_Host *lookup_irq(int irqno)
759{
760 int i;
761
762 for(i=0; i<ARRAY_SIZE(aha152x_host); i++)
763 if(aha152x_host[i] && aha152x_host[i]->irq==irqno)
764 return aha152x_host[i];
765
766 return NULL;
767}
768
769static irqreturn_t swintr(int irqno, void *dev_id, struct pt_regs *regs) 760static irqreturn_t swintr(int irqno, void *dev_id, struct pt_regs *regs)
770{ 761{
771 struct Scsi_Host *shpnt = lookup_irq(irqno); 762 struct Scsi_Host *shpnt = (struct Scsi_Host *)dev_id;
772 763
773 if (!shpnt) { 764 if (!shpnt) {
774 printk(KERN_ERR "aha152x: catched software interrupt %d for unknown controller.\n", irqno); 765 printk(KERN_ERR "aha152x: catched software interrupt %d for unknown controller.\n", irqno);
@@ -791,10 +782,11 @@ struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup)
791 return NULL; 782 return NULL;
792 } 783 }
793 784
794 /* need to have host registered before triggering any interrupt */
795 aha152x_host[registered_count] = shpnt;
796
797 memset(HOSTDATA(shpnt), 0, sizeof *HOSTDATA(shpnt)); 785 memset(HOSTDATA(shpnt), 0, sizeof *HOSTDATA(shpnt));
786 INIT_LIST_HEAD(&HOSTDATA(shpnt)->host_list);
787
788 /* need to have host registered before triggering any interrupt */
789 list_add_tail(&HOSTDATA(shpnt)->host_list, &aha152x_host_list);
798 790
799 shpnt->io_port = setup->io_port; 791 shpnt->io_port = setup->io_port;
800 shpnt->n_io_port = IO_RANGE; 792 shpnt->n_io_port = IO_RANGE;
@@ -907,12 +899,10 @@ struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup)
907 899
908 scsi_scan_host(shpnt); 900 scsi_scan_host(shpnt);
909 901
910 registered_count++;
911
912 return shpnt; 902 return shpnt;
913 903
914out_host_put: 904out_host_put:
915 aha152x_host[registered_count]=NULL; 905 list_del(&HOSTDATA(shpnt)->host_list);
916 scsi_host_put(shpnt); 906 scsi_host_put(shpnt);
917 907
918 return NULL; 908 return NULL;
@@ -937,6 +927,7 @@ void aha152x_release(struct Scsi_Host *shpnt)
937#endif 927#endif
938 928
939 scsi_remove_host(shpnt); 929 scsi_remove_host(shpnt);
930 list_del(&HOSTDATA(shpnt)->host_list);
940 scsi_host_put(shpnt); 931 scsi_host_put(shpnt);
941} 932}
942 933
@@ -1459,9 +1450,12 @@ static struct work_struct aha152x_tq;
1459 */ 1450 */
1460static void run(void) 1451static void run(void)
1461{ 1452{
1462 int i; 1453 struct aha152x_hostdata *hd;
1463 for (i = 0; i<ARRAY_SIZE(aha152x_host); i++) { 1454
1464 is_complete(aha152x_host[i]); 1455 list_for_each_entry(hd, &aha152x_host_list, host_list) {
1456 struct Scsi_Host *shost = container_of((void *)hd, struct Scsi_Host, hostdata);
1457
1458 is_complete(shost);
1465 } 1459 }
1466} 1460}
1467 1461
@@ -1471,7 +1465,7 @@ static void run(void)
1471 */ 1465 */
1472static irqreturn_t intr(int irqno, void *dev_id, struct pt_regs *regs) 1466static irqreturn_t intr(int irqno, void *dev_id, struct pt_regs *regs)
1473{ 1467{
1474 struct Scsi_Host *shpnt = lookup_irq(irqno); 1468 struct Scsi_Host *shpnt = (struct Scsi_Host *)dev_id;
1475 unsigned long flags; 1469 unsigned long flags;
1476 unsigned char rev, dmacntrl0; 1470 unsigned char rev, dmacntrl0;
1477 1471
@@ -3953,16 +3947,17 @@ static int __init aha152x_init(void)
3953#endif 3947#endif
3954 } 3948 }
3955 3949
3956 return registered_count>0; 3950 return 1;
3957} 3951}
3958 3952
3959static void __exit aha152x_exit(void) 3953static void __exit aha152x_exit(void)
3960{ 3954{
3961 int i; 3955 struct aha152x_hostdata *hd;
3956
3957 list_for_each_entry(hd, &aha152x_host_list, host_list) {
3958 struct Scsi_Host *shost = container_of((void *)hd, struct Scsi_Host, hostdata);
3962 3959
3963 for(i=0; i<ARRAY_SIZE(setup); i++) { 3960 aha152x_release(shost);
3964 aha152x_release(aha152x_host[i]);
3965 aha152x_host[i]=NULL;
3966 } 3961 }
3967} 3962}
3968 3963
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 998999c0a972..c7eeaced324a 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -321,7 +321,7 @@ MODULE_LICENSE("Dual BSD/GPL");
321MODULE_VERSION(AIC79XX_DRIVER_VERSION); 321MODULE_VERSION(AIC79XX_DRIVER_VERSION);
322module_param(aic79xx, charp, 0444); 322module_param(aic79xx, charp, 0444);
323MODULE_PARM_DESC(aic79xx, 323MODULE_PARM_DESC(aic79xx,
324"period delimited, options string.\n" 324"period-delimited options string:\n"
325" verbose Enable verbose/diagnostic logging\n" 325" verbose Enable verbose/diagnostic logging\n"
326" allow_memio Allow device registers to be memory mapped\n" 326" allow_memio Allow device registers to be memory mapped\n"
327" debug Bitmask of debug values to enable\n" 327" debug Bitmask of debug values to enable\n"
@@ -346,7 +346,7 @@ MODULE_PARM_DESC(aic79xx,
346" Shorten the selection timeout to 128ms\n" 346" Shorten the selection timeout to 128ms\n"
347"\n" 347"\n"
348" options aic79xx 'aic79xx=verbose.tag_info:{{}.{}.{..10}}.seltime:1'\n" 348" options aic79xx 'aic79xx=verbose.tag_info:{{}.{}.{..10}}.seltime:1'\n"
349"\n"); 349);
350 350
351static void ahd_linux_handle_scsi_status(struct ahd_softc *, 351static void ahd_linux_handle_scsi_status(struct ahd_softc *,
352 struct scsi_device *, 352 struct scsi_device *,
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index aa4be8a31415..64c8b88a429f 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -341,7 +341,7 @@ MODULE_LICENSE("Dual BSD/GPL");
341MODULE_VERSION(AIC7XXX_DRIVER_VERSION); 341MODULE_VERSION(AIC7XXX_DRIVER_VERSION);
342module_param(aic7xxx, charp, 0444); 342module_param(aic7xxx, charp, 0444);
343MODULE_PARM_DESC(aic7xxx, 343MODULE_PARM_DESC(aic7xxx,
344"period delimited, options string.\n" 344"period-delimited options string:\n"
345" verbose Enable verbose/diagnostic logging\n" 345" verbose Enable verbose/diagnostic logging\n"
346" allow_memio Allow device registers to be memory mapped\n" 346" allow_memio Allow device registers to be memory mapped\n"
347" debug Bitmask of debug values to enable\n" 347" debug Bitmask of debug values to enable\n"
@@ -2539,15 +2539,28 @@ static void ahc_linux_set_iu(struct scsi_target *starget, int iu)
2539static void ahc_linux_get_signalling(struct Scsi_Host *shost) 2539static void ahc_linux_get_signalling(struct Scsi_Host *shost)
2540{ 2540{
2541 struct ahc_softc *ahc = *(struct ahc_softc **)shost->hostdata; 2541 struct ahc_softc *ahc = *(struct ahc_softc **)shost->hostdata;
2542 u8 mode = ahc_inb(ahc, SBLKCTL); 2542 unsigned long flags;
2543 u8 mode;
2543 2544
2544 if (mode & ENAB40) 2545 if (!(ahc->features & AHC_ULTRA2)) {
2545 spi_signalling(shost) = SPI_SIGNAL_LVD; 2546 /* non-LVD chipset, may not have SBLKCTL reg */
2546 else if (mode & ENAB20)
2547 spi_signalling(shost) = 2547 spi_signalling(shost) =
2548 ahc->features & AHC_HVD ? 2548 ahc->features & AHC_HVD ?
2549 SPI_SIGNAL_HVD : 2549 SPI_SIGNAL_HVD :
2550 SPI_SIGNAL_SE; 2550 SPI_SIGNAL_SE;
2551 return;
2552 }
2553
2554 ahc_lock(ahc, &flags);
2555 ahc_pause(ahc);
2556 mode = ahc_inb(ahc, SBLKCTL);
2557 ahc_unpause(ahc);
2558 ahc_unlock(ahc, &flags);
2559
2560 if (mode & ENAB40)
2561 spi_signalling(shost) = SPI_SIGNAL_LVD;
2562 else if (mode & ENAB20)
2563 spi_signalling(shost) = SPI_SIGNAL_SE;
2551 else 2564 else
2552 spi_signalling(shost) = SPI_SIGNAL_UNKNOWN; 2565 spi_signalling(shost) = SPI_SIGNAL_UNKNOWN;
2553} 2566}
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c
index 3f85b5e978f1..5dcef48d414f 100644
--- a/drivers/scsi/aic7xxx_old.c
+++ b/drivers/scsi/aic7xxx_old.c
@@ -249,8 +249,6 @@
249#include <linux/stat.h> 249#include <linux/stat.h>
250#include <linux/slab.h> /* for kmalloc() */ 250#include <linux/slab.h> /* for kmalloc() */
251 251
252#include <linux/config.h> /* for CONFIG_PCI */
253
254#define AIC7XXX_C_VERSION "5.2.6" 252#define AIC7XXX_C_VERSION "5.2.6"
255 253
256#define ALL_TARGETS -1 254#define ALL_TARGETS -1
@@ -9196,7 +9194,7 @@ aic7xxx_detect(struct scsi_host_template *template)
9196 for (i = 0; i < ARRAY_SIZE(aic_pdevs); i++) 9194 for (i = 0; i < ARRAY_SIZE(aic_pdevs); i++)
9197 { 9195 {
9198 pdev = NULL; 9196 pdev = NULL;
9199 while ((pdev = pci_find_device(aic_pdevs[i].vendor_id, 9197 while ((pdev = pci_get_device(aic_pdevs[i].vendor_id,
9200 aic_pdevs[i].device_id, 9198 aic_pdevs[i].device_id,
9201 pdev))) { 9199 pdev))) {
9202 if (pci_enable_device(pdev)) 9200 if (pci_enable_device(pdev))
@@ -9653,6 +9651,9 @@ aic7xxx_detect(struct scsi_host_template *template)
9653 */ 9651 */
9654 aic7xxx_configure_bugs(temp_p); 9652 aic7xxx_configure_bugs(temp_p);
9655 9653
9654 /* Hold a pci device reference */
9655 pci_dev_get(temp_p->pdev);
9656
9656 if ( list_p == NULL ) 9657 if ( list_p == NULL )
9657 { 9658 {
9658 list_p = current_p = temp_p; 9659 list_p = current_p = temp_p;
@@ -10989,8 +10990,10 @@ aic7xxx_release(struct Scsi_Host *host)
10989 if(!p->pdev) 10990 if(!p->pdev)
10990 release_region(p->base, MAXREG - MINREG); 10991 release_region(p->base, MAXREG - MINREG);
10991#ifdef CONFIG_PCI 10992#ifdef CONFIG_PCI
10992 else 10993 else {
10993 pci_release_regions(p->pdev); 10994 pci_release_regions(p->pdev);
10995 pci_dev_put(p->pdev);
10996 }
10994#endif 10997#endif
10995 prev = NULL; 10998 prev = NULL;
10996 next = first_aic7xxx; 10999 next = first_aic7xxx;
diff --git a/drivers/scsi/aic94xx/Kconfig b/drivers/scsi/aic94xx/Kconfig
new file mode 100644
index 000000000000..0ed391d8ee84
--- /dev/null
+++ b/drivers/scsi/aic94xx/Kconfig
@@ -0,0 +1,41 @@
1#
2# Kernel configuration file for aic94xx SAS/SATA driver.
3#
4# Copyright (c) 2005 Adaptec, Inc. All rights reserved.
5# Copyright (c) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6#
7# This file is licensed under GPLv2.
8#
9# This file is part of the aic94xx driver.
10#
11# The aic94xx driver is free software; you can redistribute it and/or
12# modify it under the terms of the GNU General Public License as
13# published by the Free Software Foundation; version 2 of the
14# License.
15#
16# The aic94xx driver is distributed in the hope that it will be useful,
17# but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19# General Public License for more details.
20#
21# You should have received a copy of the GNU General Public License
22# along with Aic94xx Driver; if not, write to the Free Software
23# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24#
25#
26
27config SCSI_AIC94XX
28 tristate "Adaptec AIC94xx SAS/SATA support"
29 depends on PCI
30 select SCSI_SAS_LIBSAS
31 help
32 This driver supports Adaptec's SAS/SATA 3Gb/s 64 bit PCI-X
33 AIC94xx chip based host adapters.
34
35config AIC94XX_DEBUG
36 bool "Compile in debug mode"
37 default y
38 depends on SCSI_AIC94XX
39 help
40 Compiles the aic94xx driver in debug mode. In debug mode,
41 the driver prints some messages to the console.
diff --git a/drivers/scsi/aic94xx/Makefile b/drivers/scsi/aic94xx/Makefile
new file mode 100644
index 000000000000..e6b70123940c
--- /dev/null
+++ b/drivers/scsi/aic94xx/Makefile
@@ -0,0 +1,39 @@
1#
2# Makefile for Adaptec aic94xx SAS/SATA driver.
3#
4# Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5# Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6#
7# This file is licensed under GPLv2.
8#
9# This file is part of the the aic94xx driver.
10#
11# The aic94xx driver is free software; you can redistribute it and/or
12# modify it under the terms of the GNU General Public License as
13# published by the Free Software Foundation; version 2 of the
14# License.
15#
16# The aic94xx driver is distributed in the hope that it will be useful,
17# but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19# General Public License for more details.
20#
21# You should have received a copy of the GNU General Public License
22# along with the aic94xx driver; if not, write to the Free Software
23# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24
25ifeq ($(CONFIG_AIC94XX_DEBUG),y)
26 EXTRA_CFLAGS += -DASD_DEBUG -DASD_ENTER_EXIT
27endif
28
29obj-$(CONFIG_SCSI_AIC94XX) += aic94xx.o
30aic94xx-y += aic94xx_init.o \
31 aic94xx_hwi.o \
32 aic94xx_reg.o \
33 aic94xx_sds.o \
34 aic94xx_seq.o \
35 aic94xx_dump.o \
36 aic94xx_scb.o \
37 aic94xx_dev.o \
38 aic94xx_tmf.o \
39 aic94xx_task.o
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h
new file mode 100644
index 000000000000..1bd5b4ecf3d5
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx.h
@@ -0,0 +1,114 @@
1/*
2 * Aic94xx SAS/SATA driver header file.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 * $Id: //depot/aic94xx/aic94xx.h#31 $
26 */
27
28#ifndef _AIC94XX_H_
29#define _AIC94XX_H_
30
31#include <linux/slab.h>
32#include <linux/ctype.h>
33#include <scsi/libsas.h>
34
35#define ASD_DRIVER_NAME "aic94xx"
36#define ASD_DRIVER_DESCRIPTION "Adaptec aic94xx SAS/SATA driver"
37
38#define asd_printk(fmt, ...) printk(KERN_NOTICE ASD_DRIVER_NAME ": " fmt, ## __VA_ARGS__)
39
40#ifdef ASD_ENTER_EXIT
41#define ENTER printk(KERN_NOTICE "%s: ENTER %s\n", ASD_DRIVER_NAME, \
42 __FUNCTION__)
43#define EXIT printk(KERN_NOTICE "%s: --EXIT %s\n", ASD_DRIVER_NAME, \
44 __FUNCTION__)
45#else
46#define ENTER
47#define EXIT
48#endif
49
50#ifdef ASD_DEBUG
51#define ASD_DPRINTK asd_printk
52#else
53#define ASD_DPRINTK(fmt, ...)
54#endif
55
56/* 2*ITNL timeout + 1 second */
57#define AIC94XX_SCB_TIMEOUT (5*HZ)
58
59extern kmem_cache_t *asd_dma_token_cache;
60extern kmem_cache_t *asd_ascb_cache;
61extern char sas_addr_str[2*SAS_ADDR_SIZE + 1];
62
63static inline void asd_stringify_sas_addr(char *p, const u8 *sas_addr)
64{
65 int i;
66 for (i = 0; i < SAS_ADDR_SIZE; i++, p += 2)
67 snprintf(p, 3, "%02X", sas_addr[i]);
68 *p = '\0';
69}
70
71static inline void asd_destringify_sas_addr(u8 *sas_addr, const char *p)
72{
73 int i;
74 for (i = 0; i < SAS_ADDR_SIZE; i++) {
75 u8 h, l;
76 if (!*p)
77 break;
78 h = isdigit(*p) ? *p-'0' : *p-'A'+10;
79 p++;
80 l = isdigit(*p) ? *p-'0' : *p-'A'+10;
81 p++;
82 sas_addr[i] = (h<<4) | l;
83 }
84}
85
86struct asd_ha_struct;
87struct asd_ascb;
88
89int asd_read_ocm(struct asd_ha_struct *asd_ha);
90int asd_read_flash(struct asd_ha_struct *asd_ha);
91
92int asd_dev_found(struct domain_device *dev);
93void asd_dev_gone(struct domain_device *dev);
94
95void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id);
96
97int asd_execute_task(struct sas_task *, int num, unsigned long gfp_flags);
98
99/* ---------- TMFs ---------- */
100int asd_abort_task(struct sas_task *);
101int asd_abort_task_set(struct domain_device *, u8 *lun);
102int asd_clear_aca(struct domain_device *, u8 *lun);
103int asd_clear_task_set(struct domain_device *, u8 *lun);
104int asd_lu_reset(struct domain_device *, u8 *lun);
105int asd_query_task(struct sas_task *);
106
107/* ---------- Adapter and Port management ---------- */
108int asd_clear_nexus_port(struct asd_sas_port *port);
109int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha);
110
111/* ---------- Phy Management ---------- */
112int asd_control_phy(struct asd_sas_phy *phy, enum phy_func func, void *arg);
113
114#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c
new file mode 100644
index 000000000000..6f8901b748f7
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_dev.c
@@ -0,0 +1,353 @@
1/*
2 * Aic94xx SAS/SATA DDB management
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 * $Id: //depot/aic94xx/aic94xx_dev.c#21 $
26 */
27
28#include "aic94xx.h"
29#include "aic94xx_hwi.h"
30#include "aic94xx_reg.h"
31#include "aic94xx_sas.h"
32
33#define FIND_FREE_DDB(_ha) find_first_zero_bit((_ha)->hw_prof.ddb_bitmap, \
34 (_ha)->hw_prof.max_ddbs)
35#define SET_DDB(_ddb, _ha) set_bit(_ddb, (_ha)->hw_prof.ddb_bitmap)
36#define CLEAR_DDB(_ddb, _ha) clear_bit(_ddb, (_ha)->hw_prof.ddb_bitmap)
37
38static inline int asd_get_ddb(struct asd_ha_struct *asd_ha)
39{
40 unsigned long flags;
41 int ddb, i;
42
43 spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
44 ddb = FIND_FREE_DDB(asd_ha);
45 if (ddb >= asd_ha->hw_prof.max_ddbs) {
46 ddb = -ENOMEM;
47 spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags);
48 goto out;
49 }
50 SET_DDB(ddb, asd_ha);
51 spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags);
52
53 for (i = 0; i < sizeof(struct asd_ddb_ssp_smp_target_port); i+= 4)
54 asd_ddbsite_write_dword(asd_ha, ddb, i, 0);
55out:
56 return ddb;
57}
58
59#define INIT_CONN_TAG offsetof(struct asd_ddb_ssp_smp_target_port, init_conn_tag)
60#define DEST_SAS_ADDR offsetof(struct asd_ddb_ssp_smp_target_port, dest_sas_addr)
61#define SEND_QUEUE_HEAD offsetof(struct asd_ddb_ssp_smp_target_port, send_queue_head)
62#define DDB_TYPE offsetof(struct asd_ddb_ssp_smp_target_port, ddb_type)
63#define CONN_MASK offsetof(struct asd_ddb_ssp_smp_target_port, conn_mask)
64#define DDB_TARG_FLAGS offsetof(struct asd_ddb_ssp_smp_target_port, flags)
65#define DDB_TARG_FLAGS2 offsetof(struct asd_ddb_stp_sata_target_port, flags2)
66#define EXEC_QUEUE_TAIL offsetof(struct asd_ddb_ssp_smp_target_port, exec_queue_tail)
67#define SEND_QUEUE_TAIL offsetof(struct asd_ddb_ssp_smp_target_port, send_queue_tail)
68#define SISTER_DDB offsetof(struct asd_ddb_ssp_smp_target_port, sister_ddb)
69#define MAX_CCONN offsetof(struct asd_ddb_ssp_smp_target_port, max_concurrent_conn)
70#define NUM_CTX offsetof(struct asd_ddb_ssp_smp_target_port, num_contexts)
71#define ATA_CMD_SCBPTR offsetof(struct asd_ddb_stp_sata_target_port, ata_cmd_scbptr)
72#define SATA_TAG_ALLOC_MASK offsetof(struct asd_ddb_stp_sata_target_port, sata_tag_alloc_mask)
73#define NUM_SATA_TAGS offsetof(struct asd_ddb_stp_sata_target_port, num_sata_tags)
74#define SATA_STATUS offsetof(struct asd_ddb_stp_sata_target_port, sata_status)
75#define NCQ_DATA_SCB_PTR offsetof(struct asd_ddb_stp_sata_target_port, ncq_data_scb_ptr)
76#define ITNL_TIMEOUT offsetof(struct asd_ddb_ssp_smp_target_port, itnl_timeout)
77
78static inline void asd_free_ddb(struct asd_ha_struct *asd_ha, int ddb)
79{
80 unsigned long flags;
81
82 if (!ddb || ddb >= 0xFFFF)
83 return;
84 asd_ddbsite_write_byte(asd_ha, ddb, DDB_TYPE, DDB_TYPE_UNUSED);
85 spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
86 CLEAR_DDB(ddb, asd_ha);
87 spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags);
88}
89
90static inline void asd_set_ddb_type(struct domain_device *dev)
91{
92 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
93 int ddb = (int) (unsigned long) dev->lldd_dev;
94
95 if (dev->dev_type == SATA_PM_PORT)
96 asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_PM_PORT);
97 else if (dev->tproto)
98 asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_TARGET);
99 else
100 asd_ddbsite_write_byte(asd_ha,ddb,DDB_TYPE,DDB_TYPE_INITIATOR);
101}
102
103static int asd_init_sata_tag_ddb(struct domain_device *dev)
104{
105 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
106 int ddb, i;
107
108 ddb = asd_get_ddb(asd_ha);
109 if (ddb < 0)
110 return ddb;
111
112 for (i = 0; i < sizeof(struct asd_ddb_sata_tag); i += 2)
113 asd_ddbsite_write_word(asd_ha, ddb, i, 0xFFFF);
114
115 asd_ddbsite_write_word(asd_ha, (int) (unsigned long) dev->lldd_dev,
116 SISTER_DDB, ddb);
117 return 0;
118}
119
120static inline int asd_init_sata(struct domain_device *dev)
121{
122 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
123 int ddb = (int) (unsigned long) dev->lldd_dev;
124 u32 qdepth = 0;
125 int res = 0;
126
127 asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF);
128 if ((dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM_PORT) &&
129 dev->sata_dev.identify_device &&
130 dev->sata_dev.identify_device[10] != 0) {
131 u16 w75 = le16_to_cpu(dev->sata_dev.identify_device[75]);
132 u16 w76 = le16_to_cpu(dev->sata_dev.identify_device[76]);
133
134 if (w76 & 0x100) /* NCQ? */
135 qdepth = (w75 & 0x1F) + 1;
136 asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK,
137 (1<<qdepth)-1);
138 asd_ddbsite_write_byte(asd_ha, ddb, NUM_SATA_TAGS, qdepth);
139 }
140 if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM ||
141 dev->dev_type == SATA_PM_PORT) {
142 struct dev_to_host_fis *fis = (struct dev_to_host_fis *)
143 dev->frame_rcvd;
144 asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status);
145 }
146 asd_ddbsite_write_word(asd_ha, ddb, NCQ_DATA_SCB_PTR, 0xFFFF);
147 if (qdepth > 0)
148 res = asd_init_sata_tag_ddb(dev);
149 return res;
150}
151
152static int asd_init_target_ddb(struct domain_device *dev)
153{
154 int ddb, i;
155 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
156 u8 flags = 0;
157
158 ddb = asd_get_ddb(asd_ha);
159 if (ddb < 0)
160 return ddb;
161
162 dev->lldd_dev = (void *) (unsigned long) ddb;
163
164 asd_ddbsite_write_byte(asd_ha, ddb, 0, DDB_TP_CONN_TYPE);
165 asd_ddbsite_write_byte(asd_ha, ddb, 1, 0);
166 asd_ddbsite_write_word(asd_ha, ddb, INIT_CONN_TAG, 0xFFFF);
167 for (i = 0; i < SAS_ADDR_SIZE; i++)
168 asd_ddbsite_write_byte(asd_ha, ddb, DEST_SAS_ADDR+i,
169 dev->sas_addr[i]);
170 asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_HEAD, 0xFFFF);
171 asd_set_ddb_type(dev);
172 asd_ddbsite_write_byte(asd_ha, ddb, CONN_MASK, dev->port->phy_mask);
173 if (dev->port->oob_mode != SATA_OOB_MODE) {
174 flags |= OPEN_REQUIRED;
175 if ((dev->dev_type == SATA_DEV) ||
176 (dev->tproto & SAS_PROTO_STP)) {
177 struct smp_resp *rps_resp = &dev->sata_dev.rps_resp;
178 if (rps_resp->frame_type == SMP_RESPONSE &&
179 rps_resp->function == SMP_REPORT_PHY_SATA &&
180 rps_resp->result == SMP_RESP_FUNC_ACC) {
181 if (rps_resp->rps.affil_valid)
182 flags |= STP_AFFIL_POL;
183 if (rps_resp->rps.affil_supp)
184 flags |= SUPPORTS_AFFIL;
185 }
186 } else {
187 flags |= CONCURRENT_CONN_SUPP;
188 if (!dev->parent &&
189 (dev->dev_type == EDGE_DEV ||
190 dev->dev_type == FANOUT_DEV))
191 asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN,
192 4);
193 else
194 asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN,
195 dev->pathways);
196 asd_ddbsite_write_byte(asd_ha, ddb, NUM_CTX, 1);
197 }
198 }
199 if (dev->dev_type == SATA_PM)
200 flags |= SATA_MULTIPORT;
201 asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags);
202
203 flags = 0;
204 if (dev->tproto & SAS_PROTO_STP)
205 flags |= STP_CL_POL_NO_TX;
206 asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS2, flags);
207
208 asd_ddbsite_write_word(asd_ha, ddb, EXEC_QUEUE_TAIL, 0xFFFF);
209 asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF);
210 asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF);
211
212 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTO_STP)) {
213 i = asd_init_sata(dev);
214 if (i < 0) {
215 asd_free_ddb(asd_ha, ddb);
216 return i;
217 }
218 }
219
220 if (dev->dev_type == SAS_END_DEV) {
221 struct sas_end_device *rdev = rphy_to_end_device(dev->rphy);
222 if (rdev->I_T_nexus_loss_timeout > 0)
223 asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT,
224 min(rdev->I_T_nexus_loss_timeout,
225 (u16)ITNL_TIMEOUT_CONST));
226 else
227 asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT,
228 (u16)ITNL_TIMEOUT_CONST);
229 }
230 return 0;
231}
232
233static int asd_init_sata_pm_table_ddb(struct domain_device *dev)
234{
235 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
236 int ddb, i;
237
238 ddb = asd_get_ddb(asd_ha);
239 if (ddb < 0)
240 return ddb;
241
242 for (i = 0; i < 32; i += 2)
243 asd_ddbsite_write_word(asd_ha, ddb, i, 0xFFFF);
244
245 asd_ddbsite_write_word(asd_ha, (int) (unsigned long) dev->lldd_dev,
246 SISTER_DDB, ddb);
247
248 return 0;
249}
250
251#define PM_PORT_FLAGS offsetof(struct asd_ddb_sata_pm_port, pm_port_flags)
252#define PARENT_DDB offsetof(struct asd_ddb_sata_pm_port, parent_ddb)
253
254/**
255 * asd_init_sata_pm_port_ddb -- SATA Port Multiplier Port
256 * dev: pointer to domain device
257 *
258 * For SATA Port Multiplier Ports we need to allocate one SATA Port
259 * Multiplier Port DDB and depending on whether the target on it
260 * supports SATA II NCQ, one SATA Tag DDB.
261 */
262static int asd_init_sata_pm_port_ddb(struct domain_device *dev)
263{
264 int ddb, i, parent_ddb, pmtable_ddb;
265 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
266 u8 flags;
267
268 ddb = asd_get_ddb(asd_ha);
269 if (ddb < 0)
270 return ddb;
271
272 asd_set_ddb_type(dev);
273 flags = (dev->sata_dev.port_no << 4) | PM_PORT_SET;
274 asd_ddbsite_write_byte(asd_ha, ddb, PM_PORT_FLAGS, flags);
275 asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF);
276 asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF);
277 asd_init_sata(dev);
278
279 parent_ddb = (int) (unsigned long) dev->parent->lldd_dev;
280 asd_ddbsite_write_word(asd_ha, ddb, PARENT_DDB, parent_ddb);
281 pmtable_ddb = asd_ddbsite_read_word(asd_ha, parent_ddb, SISTER_DDB);
282 asd_ddbsite_write_word(asd_ha, pmtable_ddb, dev->sata_dev.port_no,ddb);
283
284 if (asd_ddbsite_read_byte(asd_ha, ddb, NUM_SATA_TAGS) > 0) {
285 i = asd_init_sata_tag_ddb(dev);
286 if (i < 0) {
287 asd_free_ddb(asd_ha, ddb);
288 return i;
289 }
290 }
291 return 0;
292}
293
294static int asd_init_initiator_ddb(struct domain_device *dev)
295{
296 return -ENODEV;
297}
298
299/**
300 * asd_init_sata_pm_ddb -- SATA Port Multiplier
301 * dev: pointer to domain device
302 *
303 * For STP and direct-attached SATA Port Multipliers we need
304 * one target port DDB entry and one SATA PM table DDB entry.
305 */
306static int asd_init_sata_pm_ddb(struct domain_device *dev)
307{
308 int res = 0;
309
310 res = asd_init_target_ddb(dev);
311 if (res)
312 goto out;
313 res = asd_init_sata_pm_table_ddb(dev);
314 if (res)
315 asd_free_ddb(dev->port->ha->lldd_ha,
316 (int) (unsigned long) dev->lldd_dev);
317out:
318 return res;
319}
320
321int asd_dev_found(struct domain_device *dev)
322{
323 int res = 0;
324
325 switch (dev->dev_type) {
326 case SATA_PM:
327 res = asd_init_sata_pm_ddb(dev);
328 break;
329 case SATA_PM_PORT:
330 res = asd_init_sata_pm_port_ddb(dev);
331 break;
332 default:
333 if (dev->tproto)
334 res = asd_init_target_ddb(dev);
335 else
336 res = asd_init_initiator_ddb(dev);
337 }
338 return res;
339}
340
341void asd_dev_gone(struct domain_device *dev)
342{
343 int ddb, sister_ddb;
344 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
345
346 ddb = (int) (unsigned long) dev->lldd_dev;
347 sister_ddb = asd_ddbsite_read_word(asd_ha, ddb, SISTER_DDB);
348
349 if (sister_ddb != 0xFFFF)
350 asd_free_ddb(asd_ha, sister_ddb);
351 asd_free_ddb(asd_ha, ddb);
352 dev->lldd_dev = NULL;
353}
diff --git a/drivers/scsi/aic94xx/aic94xx_dump.c b/drivers/scsi/aic94xx/aic94xx_dump.c
new file mode 100644
index 000000000000..e6ade5996d95
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_dump.c
@@ -0,0 +1,959 @@
1/*
2 * Aic94xx SAS/SATA driver dump interface.
3 *
4 * Copyright (C) 2004 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2004 David Chaw <david_chaw@adaptec.com>
6 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
7 *
8 * This file is licensed under GPLv2.
9 *
10 * This file is part of the aic94xx driver.
11 *
12 * The aic94xx driver is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License as
14 * published by the Free Software Foundation; version 2 of the
15 * License.
16 *
17 * The aic94xx driver is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with the aic94xx driver; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 *
26 * 2005/07/14/LT Complete overhaul of this file. Update pages, register
27 * locations, names, etc. Make use of macros. Print more information.
28 * Print all cseq and lseq mip and mdp.
29 *
30 */
31
32#include "linux/pci.h"
33#include "aic94xx.h"
34#include "aic94xx_reg.h"
35#include "aic94xx_reg_def.h"
36#include "aic94xx_sas.h"
37
38#include "aic94xx_dump.h"
39
40#ifdef ASD_DEBUG
41
42#define MD(x) (1 << (x))
43#define MODE_COMMON (1 << 31)
44#define MODE_0_7 (0xFF)
45
46static const struct lseq_cio_regs {
47 char *name;
48 u32 offs;
49 u8 width;
50 u32 mode;
51} LSEQmCIOREGS[] = {
52 {"LmMnSCBPTR", 0x20, 16, MD(0)|MD(1)|MD(2)|MD(3)|MD(4) },
53 {"LmMnDDBPTR", 0x22, 16, MD(0)|MD(1)|MD(2)|MD(3)|MD(4) },
54 {"LmREQMBX", 0x30, 32, MODE_COMMON },
55 {"LmRSPMBX", 0x34, 32, MODE_COMMON },
56 {"LmMnINT", 0x38, 32, MODE_0_7 },
57 {"LmMnINTEN", 0x3C, 32, MODE_0_7 },
58 {"LmXMTPRIMD", 0x40, 32, MODE_COMMON },
59 {"LmXMTPRIMCS", 0x44, 8, MODE_COMMON },
60 {"LmCONSTAT", 0x45, 8, MODE_COMMON },
61 {"LmMnDMAERRS", 0x46, 8, MD(0)|MD(1) },
62 {"LmMnSGDMAERRS", 0x47, 8, MD(0)|MD(1) },
63 {"LmMnEXPHDRP", 0x48, 8, MD(0) },
64 {"LmMnSASAALIGN", 0x48, 8, MD(1) },
65 {"LmMnMSKHDRP", 0x49, 8, MD(0) },
66 {"LmMnSTPALIGN", 0x49, 8, MD(1) },
67 {"LmMnRCVHDRP", 0x4A, 8, MD(0) },
68 {"LmMnXMTHDRP", 0x4A, 8, MD(1) },
69 {"LmALIGNMODE", 0x4B, 8, MD(1) },
70 {"LmMnEXPRCVCNT", 0x4C, 32, MD(0) },
71 {"LmMnXMTCNT", 0x4C, 32, MD(1) },
72 {"LmMnCURRTAG", 0x54, 16, MD(0) },
73 {"LmMnPREVTAG", 0x56, 16, MD(0) },
74 {"LmMnACKOFS", 0x58, 8, MD(1) },
75 {"LmMnXFRLVL", 0x59, 8, MD(0)|MD(1) },
76 {"LmMnSGDMACTL", 0x5A, 8, MD(0)|MD(1) },
77 {"LmMnSGDMASTAT", 0x5B, 8, MD(0)|MD(1) },
78 {"LmMnDDMACTL", 0x5C, 8, MD(0)|MD(1) },
79 {"LmMnDDMASTAT", 0x5D, 8, MD(0)|MD(1) },
80 {"LmMnDDMAMODE", 0x5E, 16, MD(0)|MD(1) },
81 {"LmMnPIPECTL", 0x61, 8, MD(0)|MD(1) },
82 {"LmMnACTSCB", 0x62, 16, MD(0)|MD(1) },
83 {"LmMnSGBHADR", 0x64, 8, MD(0)|MD(1) },
84 {"LmMnSGBADR", 0x65, 8, MD(0)|MD(1) },
85 {"LmMnSGDCNT", 0x66, 8, MD(0)|MD(1) },
86 {"LmMnSGDMADR", 0x68, 32, MD(0)|MD(1) },
87 {"LmMnSGDMADR", 0x6C, 32, MD(0)|MD(1) },
88 {"LmMnXFRCNT", 0x70, 32, MD(0)|MD(1) },
89 {"LmMnXMTCRC", 0x74, 32, MD(1) },
90 {"LmCURRTAG", 0x74, 16, MD(0) },
91 {"LmPREVTAG", 0x76, 16, MD(0) },
92 {"LmMnDPSEL", 0x7B, 8, MD(0)|MD(1) },
93 {"LmDPTHSTAT", 0x7C, 8, MODE_COMMON },
94 {"LmMnHOLDLVL", 0x7D, 8, MD(0) },
95 {"LmMnSATAFS", 0x7E, 8, MD(1) },
96 {"LmMnCMPLTSTAT", 0x7F, 8, MD(0)|MD(1) },
97 {"LmPRMSTAT0", 0x80, 32, MODE_COMMON },
98 {"LmPRMSTAT1", 0x84, 32, MODE_COMMON },
99 {"LmGPRMINT", 0x88, 8, MODE_COMMON },
100 {"LmMnCURRSCB", 0x8A, 16, MD(0) },
101 {"LmPRMICODE", 0x8C, 32, MODE_COMMON },
102 {"LmMnRCVCNT", 0x90, 16, MD(0) },
103 {"LmMnBUFSTAT", 0x92, 16, MD(0) },
104 {"LmMnXMTHDRSIZE",0x92, 8, MD(1) },
105 {"LmMnXMTSIZE", 0x93, 8, MD(1) },
106 {"LmMnTGTXFRCNT", 0x94, 32, MD(0) },
107 {"LmMnEXPROFS", 0x98, 32, MD(0) },
108 {"LmMnXMTROFS", 0x98, 32, MD(1) },
109 {"LmMnRCVROFS", 0x9C, 32, MD(0) },
110 {"LmCONCTL", 0xA0, 16, MODE_COMMON },
111 {"LmBITLTIMER", 0xA2, 16, MODE_COMMON },
112 {"LmWWNLOW", 0xA8, 32, MODE_COMMON },
113 {"LmWWNHIGH", 0xAC, 32, MODE_COMMON },
114 {"LmMnFRMERR", 0xB0, 32, MD(0) },
115 {"LmMnFRMERREN", 0xB4, 32, MD(0) },
116 {"LmAWTIMER", 0xB8, 16, MODE_COMMON },
117 {"LmAWTCTL", 0xBA, 8, MODE_COMMON },
118 {"LmMnHDRCMPS", 0xC0, 32, MD(0) },
119 {"LmMnXMTSTAT", 0xC4, 8, MD(1) },
120 {"LmHWTSTATEN", 0xC5, 8, MODE_COMMON },
121 {"LmMnRRDYRC", 0xC6, 8, MD(0) },
122 {"LmMnRRDYTC", 0xC6, 8, MD(1) },
123 {"LmHWTSTAT", 0xC7, 8, MODE_COMMON },
124 {"LmMnDATABUFADR",0xC8, 16, MD(0)|MD(1) },
125 {"LmDWSSTATUS", 0xCB, 8, MODE_COMMON },
126 {"LmMnACTSTAT", 0xCE, 16, MD(0)|MD(1) },
127 {"LmMnREQSCB", 0xD2, 16, MD(0)|MD(1) },
128 {"LmXXXPRIM", 0xD4, 32, MODE_COMMON },
129 {"LmRCVASTAT", 0xD9, 8, MODE_COMMON },
130 {"LmINTDIS1", 0xDA, 8, MODE_COMMON },
131 {"LmPSTORESEL", 0xDB, 8, MODE_COMMON },
132 {"LmPSTORE", 0xDC, 32, MODE_COMMON },
133 {"LmPRIMSTAT0EN", 0xE0, 32, MODE_COMMON },
134 {"LmPRIMSTAT1EN", 0xE4, 32, MODE_COMMON },
135 {"LmDONETCTL", 0xF2, 16, MODE_COMMON },
136 {NULL, 0, 0, 0 }
137};
138/*
139static struct lseq_cio_regs LSEQmOOBREGS[] = {
140 {"OOB_BFLTR" ,0x100, 8, MD(5)},
141 {"OOB_INIT_MIN" ,0x102,16, MD(5)},
142 {"OOB_INIT_MAX" ,0x104,16, MD(5)},
143 {"OOB_INIT_NEG" ,0x106,16, MD(5)},
144 {"OOB_SAS_MIN" ,0x108,16, MD(5)},
145 {"OOB_SAS_MAX" ,0x10A,16, MD(5)},
146 {"OOB_SAS_NEG" ,0x10C,16, MD(5)},
147 {"OOB_WAKE_MIN" ,0x10E,16, MD(5)},
148 {"OOB_WAKE_MAX" ,0x110,16, MD(5)},
149 {"OOB_WAKE_NEG" ,0x112,16, MD(5)},
150 {"OOB_IDLE_MAX" ,0x114,16, MD(5)},
151 {"OOB_BURST_MAX" ,0x116,16, MD(5)},
152 {"OOB_XMIT_BURST" ,0x118, 8, MD(5)},
153 {"OOB_SEND_PAIRS" ,0x119, 8, MD(5)},
154 {"OOB_INIT_IDLE" ,0x11A, 8, MD(5)},
155 {"OOB_INIT_NEGO" ,0x11C, 8, MD(5)},
156 {"OOB_SAS_IDLE" ,0x11E, 8, MD(5)},
157 {"OOB_SAS_NEGO" ,0x120, 8, MD(5)},
158 {"OOB_WAKE_IDLE" ,0x122, 8, MD(5)},
159 {"OOB_WAKE_NEGO" ,0x124, 8, MD(5)},
160 {"OOB_DATA_KBITS" ,0x126, 8, MD(5)},
161 {"OOB_BURST_DATA" ,0x128,32, MD(5)},
162 {"OOB_ALIGN_0_DATA" ,0x12C,32, MD(5)},
163 {"OOB_ALIGN_1_DATA" ,0x130,32, MD(5)},
164 {"OOB_SYNC_DATA" ,0x134,32, MD(5)},
165 {"OOB_D10_2_DATA" ,0x138,32, MD(5)},
166 {"OOB_PHY_RST_CNT" ,0x13C,32, MD(5)},
167 {"OOB_SIG_GEN" ,0x140, 8, MD(5)},
168 {"OOB_XMIT" ,0x141, 8, MD(5)},
169 {"FUNCTION_MAKS" ,0x142, 8, MD(5)},
170 {"OOB_MODE" ,0x143, 8, MD(5)},
171 {"CURRENT_STATUS" ,0x144, 8, MD(5)},
172 {"SPEED_MASK" ,0x145, 8, MD(5)},
173 {"PRIM_COUNT" ,0x146, 8, MD(5)},
174 {"OOB_SIGNALS" ,0x148, 8, MD(5)},
175 {"OOB_DATA_DET" ,0x149, 8, MD(5)},
176 {"OOB_TIME_OUT" ,0x14C, 8, MD(5)},
177 {"OOB_TIMER_ENABLE" ,0x14D, 8, MD(5)},
178 {"OOB_STATUS" ,0x14E, 8, MD(5)},
179 {"HOT_PLUG_DELAY" ,0x150, 8, MD(5)},
180 {"RCD_DELAY" ,0x151, 8, MD(5)},
181 {"COMSAS_TIMER" ,0x152, 8, MD(5)},
182 {"SNTT_DELAY" ,0x153, 8, MD(5)},
183 {"SPD_CHNG_DELAY" ,0x154, 8, MD(5)},
184 {"SNLT_DELAY" ,0x155, 8, MD(5)},
185 {"SNWT_DELAY" ,0x156, 8, MD(5)},
186 {"ALIGN_DELAY" ,0x157, 8, MD(5)},
187 {"INT_ENABLE_0" ,0x158, 8, MD(5)},
188 {"INT_ENABLE_1" ,0x159, 8, MD(5)},
189 {"INT_ENABLE_2" ,0x15A, 8, MD(5)},
190 {"INT_ENABLE_3" ,0x15B, 8, MD(5)},
191 {"OOB_TEST_REG" ,0x15C, 8, MD(5)},
192 {"PHY_CONTROL_0" ,0x160, 8, MD(5)},
193 {"PHY_CONTROL_1" ,0x161, 8, MD(5)},
194 {"PHY_CONTROL_2" ,0x162, 8, MD(5)},
195 {"PHY_CONTROL_3" ,0x163, 8, MD(5)},
196 {"PHY_OOB_CAL_TX" ,0x164, 8, MD(5)},
197 {"PHY_OOB_CAL_RX" ,0x165, 8, MD(5)},
198 {"OOB_PHY_CAL_TX" ,0x166, 8, MD(5)},
199 {"OOB_PHY_CAL_RX" ,0x167, 8, MD(5)},
200 {"PHY_CONTROL_4" ,0x168, 8, MD(5)},
201 {"PHY_TEST" ,0x169, 8, MD(5)},
202 {"PHY_PWR_CTL" ,0x16A, 8, MD(5)},
203 {"PHY_PWR_DELAY" ,0x16B, 8, MD(5)},
204 {"OOB_SM_CON" ,0x16C, 8, MD(5)},
205 {"ADDR_TRAP_1" ,0x16D, 8, MD(5)},
206 {"ADDR_NEXT_1" ,0x16E, 8, MD(5)},
207 {"NEXT_ST_1" ,0x16F, 8, MD(5)},
208 {"OOB_SM_STATE" ,0x170, 8, MD(5)},
209 {"ADDR_TRAP_2" ,0x171, 8, MD(5)},
210 {"ADDR_NEXT_2" ,0x172, 8, MD(5)},
211 {"NEXT_ST_2" ,0x173, 8, MD(5)},
212 {NULL, 0, 0, 0 }
213};
214*/
215#define STR_8BIT " %30s[0x%04x]:0x%02x\n"
216#define STR_16BIT " %30s[0x%04x]:0x%04x\n"
217#define STR_32BIT " %30s[0x%04x]:0x%08x\n"
218#define STR_64BIT " %30s[0x%04x]:0x%llx\n"
219
220#define PRINT_REG_8bit(_ha, _n, _r) asd_printk(STR_8BIT, #_n, _n, \
221 asd_read_reg_byte(_ha, _r))
222#define PRINT_REG_16bit(_ha, _n, _r) asd_printk(STR_16BIT, #_n, _n, \
223 asd_read_reg_word(_ha, _r))
224#define PRINT_REG_32bit(_ha, _n, _r) asd_printk(STR_32BIT, #_n, _n, \
225 asd_read_reg_dword(_ha, _r))
226
227#define PRINT_CREG_8bit(_ha, _n) asd_printk(STR_8BIT, #_n, _n, \
228 asd_read_reg_byte(_ha, C##_n))
229#define PRINT_CREG_16bit(_ha, _n) asd_printk(STR_16BIT, #_n, _n, \
230 asd_read_reg_word(_ha, C##_n))
231#define PRINT_CREG_32bit(_ha, _n) asd_printk(STR_32BIT, #_n, _n, \
232 asd_read_reg_dword(_ha, C##_n))
233
234#define MSTR_8BIT " Mode:%02d %30s[0x%04x]:0x%02x\n"
235#define MSTR_16BIT " Mode:%02d %30s[0x%04x]:0x%04x\n"
236#define MSTR_32BIT " Mode:%02d %30s[0x%04x]:0x%08x\n"
237
238#define PRINT_MREG_8bit(_ha, _m, _n, _r) asd_printk(MSTR_8BIT, _m, #_n, _n, \
239 asd_read_reg_byte(_ha, _r))
240#define PRINT_MREG_16bit(_ha, _m, _n, _r) asd_printk(MSTR_16BIT, _m, #_n, _n, \
241 asd_read_reg_word(_ha, _r))
242#define PRINT_MREG_32bit(_ha, _m, _n, _r) asd_printk(MSTR_32BIT, _m, #_n, _n, \
243 asd_read_reg_dword(_ha, _r))
244
245/* can also be used for MD when the register is mode aware already */
246#define PRINT_MIS_byte(_ha, _n) asd_printk(STR_8BIT, #_n,CSEQ_##_n-CMAPPEDSCR,\
247 asd_read_reg_byte(_ha, CSEQ_##_n))
248#define PRINT_MIS_word(_ha, _n) asd_printk(STR_16BIT,#_n,CSEQ_##_n-CMAPPEDSCR,\
249 asd_read_reg_word(_ha, CSEQ_##_n))
250#define PRINT_MIS_dword(_ha, _n) \
251 asd_printk(STR_32BIT,#_n,CSEQ_##_n-CMAPPEDSCR,\
252 asd_read_reg_dword(_ha, CSEQ_##_n))
253#define PRINT_MIS_qword(_ha, _n) \
254 asd_printk(STR_64BIT, #_n,CSEQ_##_n-CMAPPEDSCR, \
255 (unsigned long long)(((u64)asd_read_reg_dword(_ha, CSEQ_##_n)) \
256 | (((u64)asd_read_reg_dword(_ha, (CSEQ_##_n)+4))<<32)))
257
258#define CMDP_REG(_n, _m) (_m*(CSEQ_PAGE_SIZE*2)+CSEQ_##_n)
259#define PRINT_CMDP_word(_ha, _n) \
260asd_printk("%20s 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", \
261 #_n, \
262 asd_read_reg_word(_ha, CMDP_REG(_n, 0)), \
263 asd_read_reg_word(_ha, CMDP_REG(_n, 1)), \
264 asd_read_reg_word(_ha, CMDP_REG(_n, 2)), \
265 asd_read_reg_word(_ha, CMDP_REG(_n, 3)), \
266 asd_read_reg_word(_ha, CMDP_REG(_n, 4)), \
267 asd_read_reg_word(_ha, CMDP_REG(_n, 5)), \
268 asd_read_reg_word(_ha, CMDP_REG(_n, 6)), \
269 asd_read_reg_word(_ha, CMDP_REG(_n, 7)))
270
271#define PRINT_CMDP_byte(_ha, _n) \
272asd_printk("%20s 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", \
273 #_n, \
274 asd_read_reg_byte(_ha, CMDP_REG(_n, 0)), \
275 asd_read_reg_byte(_ha, CMDP_REG(_n, 1)), \
276 asd_read_reg_byte(_ha, CMDP_REG(_n, 2)), \
277 asd_read_reg_byte(_ha, CMDP_REG(_n, 3)), \
278 asd_read_reg_byte(_ha, CMDP_REG(_n, 4)), \
279 asd_read_reg_byte(_ha, CMDP_REG(_n, 5)), \
280 asd_read_reg_byte(_ha, CMDP_REG(_n, 6)), \
281 asd_read_reg_byte(_ha, CMDP_REG(_n, 7)))
282
283static void asd_dump_cseq_state(struct asd_ha_struct *asd_ha)
284{
285 int mode;
286
287 asd_printk("CSEQ STATE\n");
288
289 asd_printk("ARP2 REGISTERS\n");
290
291 PRINT_CREG_32bit(asd_ha, ARP2CTL);
292 PRINT_CREG_32bit(asd_ha, ARP2INT);
293 PRINT_CREG_32bit(asd_ha, ARP2INTEN);
294 PRINT_CREG_8bit(asd_ha, MODEPTR);
295 PRINT_CREG_8bit(asd_ha, ALTMODE);
296 PRINT_CREG_8bit(asd_ha, FLAG);
297 PRINT_CREG_8bit(asd_ha, ARP2INTCTL);
298 PRINT_CREG_16bit(asd_ha, STACK);
299 PRINT_CREG_16bit(asd_ha, PRGMCNT);
300 PRINT_CREG_16bit(asd_ha, ACCUM);
301 PRINT_CREG_16bit(asd_ha, SINDEX);
302 PRINT_CREG_16bit(asd_ha, DINDEX);
303 PRINT_CREG_8bit(asd_ha, SINDIR);
304 PRINT_CREG_8bit(asd_ha, DINDIR);
305 PRINT_CREG_8bit(asd_ha, JUMLDIR);
306 PRINT_CREG_8bit(asd_ha, ARP2HALTCODE);
307 PRINT_CREG_16bit(asd_ha, CURRADDR);
308 PRINT_CREG_16bit(asd_ha, LASTADDR);
309 PRINT_CREG_16bit(asd_ha, NXTLADDR);
310
311 asd_printk("IOP REGISTERS\n");
312
313 PRINT_REG_32bit(asd_ha, BISTCTL1, CBISTCTL);
314 PRINT_CREG_32bit(asd_ha, MAPPEDSCR);
315
316 asd_printk("CIO REGISTERS\n");
317
318 for (mode = 0; mode < 9; mode++)
319 PRINT_MREG_16bit(asd_ha, mode, MnSCBPTR, CMnSCBPTR(mode));
320 PRINT_MREG_16bit(asd_ha, 15, MnSCBPTR, CMnSCBPTR(15));
321
322 for (mode = 0; mode < 9; mode++)
323 PRINT_MREG_16bit(asd_ha, mode, MnDDBPTR, CMnDDBPTR(mode));
324 PRINT_MREG_16bit(asd_ha, 15, MnDDBPTR, CMnDDBPTR(15));
325
326 for (mode = 0; mode < 8; mode++)
327 PRINT_MREG_32bit(asd_ha, mode, MnREQMBX, CMnREQMBX(mode));
328 for (mode = 0; mode < 8; mode++)
329 PRINT_MREG_32bit(asd_ha, mode, MnRSPMBX, CMnRSPMBX(mode));
330 for (mode = 0; mode < 8; mode++)
331 PRINT_MREG_32bit(asd_ha, mode, MnINT, CMnINT(mode));
332 for (mode = 0; mode < 8; mode++)
333 PRINT_MREG_32bit(asd_ha, mode, MnINTEN, CMnINTEN(mode));
334
335 PRINT_CREG_8bit(asd_ha, SCRATCHPAGE);
336 for (mode = 0; mode < 8; mode++)
337 PRINT_MREG_8bit(asd_ha, mode, MnSCRATCHPAGE,
338 CMnSCRATCHPAGE(mode));
339
340 PRINT_REG_32bit(asd_ha, CLINKCON, CLINKCON);
341 PRINT_REG_8bit(asd_ha, CCONMSK, CCONMSK);
342 PRINT_REG_8bit(asd_ha, CCONEXIST, CCONEXIST);
343 PRINT_REG_16bit(asd_ha, CCONMODE, CCONMODE);
344 PRINT_REG_32bit(asd_ha, CTIMERCALC, CTIMERCALC);
345 PRINT_REG_8bit(asd_ha, CINTDIS, CINTDIS);
346
347 asd_printk("SCRATCH MEMORY\n");
348
349 asd_printk("MIP 4 >>>>>\n");
350 PRINT_MIS_word(asd_ha, Q_EXE_HEAD);
351 PRINT_MIS_word(asd_ha, Q_EXE_TAIL);
352 PRINT_MIS_word(asd_ha, Q_DONE_HEAD);
353 PRINT_MIS_word(asd_ha, Q_DONE_TAIL);
354 PRINT_MIS_word(asd_ha, Q_SEND_HEAD);
355 PRINT_MIS_word(asd_ha, Q_SEND_TAIL);
356 PRINT_MIS_word(asd_ha, Q_DMA2CHIM_HEAD);
357 PRINT_MIS_word(asd_ha, Q_DMA2CHIM_TAIL);
358 PRINT_MIS_word(asd_ha, Q_COPY_HEAD);
359 PRINT_MIS_word(asd_ha, Q_COPY_TAIL);
360 PRINT_MIS_word(asd_ha, REG0);
361 PRINT_MIS_word(asd_ha, REG1);
362 PRINT_MIS_dword(asd_ha, REG2);
363 PRINT_MIS_byte(asd_ha, LINK_CTL_Q_MAP);
364 PRINT_MIS_byte(asd_ha, MAX_CSEQ_MODE);
365 PRINT_MIS_byte(asd_ha, FREE_LIST_HACK_COUNT);
366
367 asd_printk("MIP 5 >>>>\n");
368 PRINT_MIS_qword(asd_ha, EST_NEXUS_REQ_QUEUE);
369 PRINT_MIS_qword(asd_ha, EST_NEXUS_REQ_COUNT);
370 PRINT_MIS_word(asd_ha, Q_EST_NEXUS_HEAD);
371 PRINT_MIS_word(asd_ha, Q_EST_NEXUS_TAIL);
372 PRINT_MIS_word(asd_ha, NEED_EST_NEXUS_SCB);
373 PRINT_MIS_byte(asd_ha, EST_NEXUS_REQ_HEAD);
374 PRINT_MIS_byte(asd_ha, EST_NEXUS_REQ_TAIL);
375 PRINT_MIS_byte(asd_ha, EST_NEXUS_SCB_OFFSET);
376
377 asd_printk("MIP 6 >>>>\n");
378 PRINT_MIS_word(asd_ha, INT_ROUT_RET_ADDR0);
379 PRINT_MIS_word(asd_ha, INT_ROUT_RET_ADDR1);
380 PRINT_MIS_word(asd_ha, INT_ROUT_SCBPTR);
381 PRINT_MIS_byte(asd_ha, INT_ROUT_MODE);
382 PRINT_MIS_byte(asd_ha, ISR_SCRATCH_FLAGS);
383 PRINT_MIS_word(asd_ha, ISR_SAVE_SINDEX);
384 PRINT_MIS_word(asd_ha, ISR_SAVE_DINDEX);
385 PRINT_MIS_word(asd_ha, Q_MONIRTT_HEAD);
386 PRINT_MIS_word(asd_ha, Q_MONIRTT_TAIL);
387 PRINT_MIS_byte(asd_ha, FREE_SCB_MASK);
388 PRINT_MIS_word(asd_ha, BUILTIN_FREE_SCB_HEAD);
389 PRINT_MIS_word(asd_ha, BUILTIN_FREE_SCB_TAIL);
390 PRINT_MIS_word(asd_ha, EXTENDED_FREE_SCB_HEAD);
391 PRINT_MIS_word(asd_ha, EXTENDED_FREE_SCB_TAIL);
392
393 asd_printk("MIP 7 >>>>\n");
394 PRINT_MIS_qword(asd_ha, EMPTY_REQ_QUEUE);
395 PRINT_MIS_qword(asd_ha, EMPTY_REQ_COUNT);
396 PRINT_MIS_word(asd_ha, Q_EMPTY_HEAD);
397 PRINT_MIS_word(asd_ha, Q_EMPTY_TAIL);
398 PRINT_MIS_word(asd_ha, NEED_EMPTY_SCB);
399 PRINT_MIS_byte(asd_ha, EMPTY_REQ_HEAD);
400 PRINT_MIS_byte(asd_ha, EMPTY_REQ_TAIL);
401 PRINT_MIS_byte(asd_ha, EMPTY_SCB_OFFSET);
402 PRINT_MIS_word(asd_ha, PRIMITIVE_DATA);
403 PRINT_MIS_dword(asd_ha, TIMEOUT_CONST);
404
405 asd_printk("MDP 0 >>>>\n");
406 asd_printk("%-20s %6s %6s %6s %6s %6s %6s %6s %6s\n",
407 "Mode: ", "0", "1", "2", "3", "4", "5", "6", "7");
408 PRINT_CMDP_word(asd_ha, LRM_SAVE_SINDEX);
409 PRINT_CMDP_word(asd_ha, LRM_SAVE_SCBPTR);
410 PRINT_CMDP_word(asd_ha, Q_LINK_HEAD);
411 PRINT_CMDP_word(asd_ha, Q_LINK_TAIL);
412 PRINT_CMDP_byte(asd_ha, LRM_SAVE_SCRPAGE);
413
414 asd_printk("MDP 0 Mode 8 >>>>\n");
415 PRINT_MIS_word(asd_ha, RET_ADDR);
416 PRINT_MIS_word(asd_ha, RET_SCBPTR);
417 PRINT_MIS_word(asd_ha, SAVE_SCBPTR);
418 PRINT_MIS_word(asd_ha, EMPTY_TRANS_CTX);
419 PRINT_MIS_word(asd_ha, RESP_LEN);
420 PRINT_MIS_word(asd_ha, TMF_SCBPTR);
421 PRINT_MIS_word(asd_ha, GLOBAL_PREV_SCB);
422 PRINT_MIS_word(asd_ha, GLOBAL_HEAD);
423 PRINT_MIS_word(asd_ha, CLEAR_LU_HEAD);
424 PRINT_MIS_byte(asd_ha, TMF_OPCODE);
425 PRINT_MIS_byte(asd_ha, SCRATCH_FLAGS);
426 PRINT_MIS_word(asd_ha, HSB_SITE);
427 PRINT_MIS_word(asd_ha, FIRST_INV_SCB_SITE);
428 PRINT_MIS_word(asd_ha, FIRST_INV_DDB_SITE);
429
430 asd_printk("MDP 1 Mode 8 >>>>\n");
431 PRINT_MIS_qword(asd_ha, LUN_TO_CLEAR);
432 PRINT_MIS_qword(asd_ha, LUN_TO_CHECK);
433
434 asd_printk("MDP 2 Mode 8 >>>>\n");
435 PRINT_MIS_qword(asd_ha, HQ_NEW_POINTER);
436 PRINT_MIS_qword(asd_ha, HQ_DONE_BASE);
437 PRINT_MIS_dword(asd_ha, HQ_DONE_POINTER);
438 PRINT_MIS_byte(asd_ha, HQ_DONE_PASS);
439}
440
441#define PRINT_LREG_8bit(_h, _lseq, _n) \
442 asd_printk(STR_8BIT, #_n, _n, asd_read_reg_byte(_h, Lm##_n(_lseq)))
443#define PRINT_LREG_16bit(_h, _lseq, _n) \
444 asd_printk(STR_16BIT, #_n, _n, asd_read_reg_word(_h, Lm##_n(_lseq)))
445#define PRINT_LREG_32bit(_h, _lseq, _n) \
446 asd_printk(STR_32BIT, #_n, _n, asd_read_reg_dword(_h, Lm##_n(_lseq)))
447
448#define PRINT_LMIP_byte(_h, _lseq, _n) \
449 asd_printk(STR_8BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \
450 asd_read_reg_byte(_h, LmSEQ_##_n(_lseq)))
451#define PRINT_LMIP_word(_h, _lseq, _n) \
452 asd_printk(STR_16BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \
453 asd_read_reg_word(_h, LmSEQ_##_n(_lseq)))
454#define PRINT_LMIP_dword(_h, _lseq, _n) \
455 asd_printk(STR_32BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \
456 asd_read_reg_dword(_h, LmSEQ_##_n(_lseq)))
457#define PRINT_LMIP_qword(_h, _lseq, _n) \
458 asd_printk(STR_64BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \
459 (unsigned long long)(((unsigned long long) \
460 asd_read_reg_dword(_h, LmSEQ_##_n(_lseq))) \
461 | (((unsigned long long) \
462 asd_read_reg_dword(_h, LmSEQ_##_n(_lseq)+4))<<32)))
463
464static void asd_print_lseq_cio_reg(struct asd_ha_struct *asd_ha,
465 u32 lseq_cio_addr, int i)
466{
467 switch (LSEQmCIOREGS[i].width) {
468 case 8:
469 asd_printk("%20s[0x%x]: 0x%02x\n", LSEQmCIOREGS[i].name,
470 LSEQmCIOREGS[i].offs,
471 asd_read_reg_byte(asd_ha, lseq_cio_addr +
472 LSEQmCIOREGS[i].offs));
473
474 break;
475 case 16:
476 asd_printk("%20s[0x%x]: 0x%04x\n", LSEQmCIOREGS[i].name,
477 LSEQmCIOREGS[i].offs,
478 asd_read_reg_word(asd_ha, lseq_cio_addr +
479 LSEQmCIOREGS[i].offs));
480
481 break;
482 case 32:
483 asd_printk("%20s[0x%x]: 0x%08x\n", LSEQmCIOREGS[i].name,
484 LSEQmCIOREGS[i].offs,
485 asd_read_reg_dword(asd_ha, lseq_cio_addr +
486 LSEQmCIOREGS[i].offs));
487 break;
488 }
489}
490
491static void asd_dump_lseq_state(struct asd_ha_struct *asd_ha, int lseq)
492{
493 u32 moffs;
494 int mode;
495
496 asd_printk("LSEQ %d STATE\n", lseq);
497
498 asd_printk("LSEQ%d: ARP2 REGISTERS\n", lseq);
499 PRINT_LREG_32bit(asd_ha, lseq, ARP2CTL);
500 PRINT_LREG_32bit(asd_ha, lseq, ARP2INT);
501 PRINT_LREG_32bit(asd_ha, lseq, ARP2INTEN);
502 PRINT_LREG_8bit(asd_ha, lseq, MODEPTR);
503 PRINT_LREG_8bit(asd_ha, lseq, ALTMODE);
504 PRINT_LREG_8bit(asd_ha, lseq, FLAG);
505 PRINT_LREG_8bit(asd_ha, lseq, ARP2INTCTL);
506 PRINT_LREG_16bit(asd_ha, lseq, STACK);
507 PRINT_LREG_16bit(asd_ha, lseq, PRGMCNT);
508 PRINT_LREG_16bit(asd_ha, lseq, ACCUM);
509 PRINT_LREG_16bit(asd_ha, lseq, SINDEX);
510 PRINT_LREG_16bit(asd_ha, lseq, DINDEX);
511 PRINT_LREG_8bit(asd_ha, lseq, SINDIR);
512 PRINT_LREG_8bit(asd_ha, lseq, DINDIR);
513 PRINT_LREG_8bit(asd_ha, lseq, JUMLDIR);
514 PRINT_LREG_8bit(asd_ha, lseq, ARP2HALTCODE);
515 PRINT_LREG_16bit(asd_ha, lseq, CURRADDR);
516 PRINT_LREG_16bit(asd_ha, lseq, LASTADDR);
517 PRINT_LREG_16bit(asd_ha, lseq, NXTLADDR);
518
519 asd_printk("LSEQ%d: IOP REGISTERS\n", lseq);
520
521 PRINT_LREG_32bit(asd_ha, lseq, MODECTL);
522 PRINT_LREG_32bit(asd_ha, lseq, DBGMODE);
523 PRINT_LREG_32bit(asd_ha, lseq, CONTROL);
524 PRINT_REG_32bit(asd_ha, BISTCTL0, LmBISTCTL0(lseq));
525 PRINT_REG_32bit(asd_ha, BISTCTL1, LmBISTCTL1(lseq));
526
527 asd_printk("LSEQ%d: CIO REGISTERS\n", lseq);
528 asd_printk("Mode common:\n");
529
530 for (mode = 0; mode < 8; mode++) {
531 u32 lseq_cio_addr = LmSEQ_PHY_BASE(mode, lseq);
532 int i;
533
534 for (i = 0; LSEQmCIOREGS[i].name; i++)
535 if (LSEQmCIOREGS[i].mode == MODE_COMMON)
536 asd_print_lseq_cio_reg(asd_ha,lseq_cio_addr,i);
537 }
538
539 asd_printk("Mode unique:\n");
540 for (mode = 0; mode < 8; mode++) {
541 u32 lseq_cio_addr = LmSEQ_PHY_BASE(mode, lseq);
542 int i;
543
544 asd_printk("Mode %d\n", mode);
545 for (i = 0; LSEQmCIOREGS[i].name; i++) {
546 if (!(LSEQmCIOREGS[i].mode & (1 << mode)))
547 continue;
548 asd_print_lseq_cio_reg(asd_ha, lseq_cio_addr, i);
549 }
550 }
551
552 asd_printk("SCRATCH MEMORY\n");
553
554 asd_printk("LSEQ%d MIP 0 >>>>\n", lseq);
555 PRINT_LMIP_word(asd_ha, lseq, Q_TGTXFR_HEAD);
556 PRINT_LMIP_word(asd_ha, lseq, Q_TGTXFR_TAIL);
557 PRINT_LMIP_byte(asd_ha, lseq, LINK_NUMBER);
558 PRINT_LMIP_byte(asd_ha, lseq, SCRATCH_FLAGS);
559 PRINT_LMIP_qword(asd_ha, lseq, CONNECTION_STATE);
560 PRINT_LMIP_word(asd_ha, lseq, CONCTL);
561 PRINT_LMIP_byte(asd_ha, lseq, CONSTAT);
562 PRINT_LMIP_byte(asd_ha, lseq, CONNECTION_MODES);
563 PRINT_LMIP_word(asd_ha, lseq, REG1_ISR);
564 PRINT_LMIP_word(asd_ha, lseq, REG2_ISR);
565 PRINT_LMIP_word(asd_ha, lseq, REG3_ISR);
566 PRINT_LMIP_qword(asd_ha, lseq,REG0_ISR);
567
568 asd_printk("LSEQ%d MIP 1 >>>>\n", lseq);
569 PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR0);
570 PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR1);
571 PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR2);
572 PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR3);
573 PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE0);
574 PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE1);
575 PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE2);
576 PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE3);
577 PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_HEAD);
578 PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_TAIL);
579 PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_BUF_AVAIL);
580 PRINT_LMIP_dword(asd_ha, lseq, TIMEOUT_CONST);
581 PRINT_LMIP_word(asd_ha, lseq, ISR_SAVE_SINDEX);
582 PRINT_LMIP_word(asd_ha, lseq, ISR_SAVE_DINDEX);
583
584 asd_printk("LSEQ%d MIP 2 >>>>\n", lseq);
585 PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR0);
586 PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR1);
587 PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR2);
588 PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR3);
589 PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD0);
590 PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD1);
591 PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD2);
592 PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD3);
593 PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_HEAD);
594 PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_TAIL);
595 PRINT_LMIP_byte(asd_ha, lseq, EMPTY_BUFS_AVAIL);
596
597 asd_printk("LSEQ%d MIP 3 >>>>\n", lseq);
598 PRINT_LMIP_dword(asd_ha, lseq, DEV_PRES_TMR_TOUT_CONST);
599 PRINT_LMIP_dword(asd_ha, lseq, SATA_INTERLOCK_TIMEOUT);
600 PRINT_LMIP_dword(asd_ha, lseq, SRST_ASSERT_TIMEOUT);
601 PRINT_LMIP_dword(asd_ha, lseq, RCV_FIS_TIMEOUT);
602 PRINT_LMIP_dword(asd_ha, lseq, ONE_MILLISEC_TIMEOUT);
603 PRINT_LMIP_dword(asd_ha, lseq, TEN_MS_COMINIT_TIMEOUT);
604 PRINT_LMIP_dword(asd_ha, lseq, SMP_RCV_TIMEOUT);
605
606 for (mode = 0; mode < 3; mode++) {
607 asd_printk("LSEQ%d MDP 0 MODE %d >>>>\n", lseq, mode);
608 moffs = mode * LSEQ_MODE_SCRATCH_SIZE;
609
610 asd_printk(STR_16BIT, "RET_ADDR", 0,
611 asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq)
612 + moffs));
613 asd_printk(STR_16BIT, "REG0_MODE", 2,
614 asd_read_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq)
615 + moffs));
616 asd_printk(STR_16BIT, "MODE_FLAGS", 4,
617 asd_read_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq)
618 + moffs));
619 asd_printk(STR_16BIT, "RET_ADDR2", 0x6,
620 asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq)
621 + moffs));
622 asd_printk(STR_16BIT, "RET_ADDR1", 0x8,
623 asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq)
624 + moffs));
625 asd_printk(STR_8BIT, "OPCODE_TO_CSEQ", 0xB,
626 asd_read_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq)
627 + moffs));
628 asd_printk(STR_16BIT, "DATA_TO_CSEQ", 0xC,
629 asd_read_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq)
630 + moffs));
631 }
632
633 asd_printk("LSEQ%d MDP 0 MODE 5 >>>>\n", lseq);
634 moffs = LSEQ_MODE5_PAGE0_OFFSET;
635 asd_printk(STR_16BIT, "RET_ADDR", 0,
636 asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq) + moffs));
637 asd_printk(STR_16BIT, "REG0_MODE", 2,
638 asd_read_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq) + moffs));
639 asd_printk(STR_16BIT, "MODE_FLAGS", 4,
640 asd_read_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq) + moffs));
641 asd_printk(STR_16BIT, "RET_ADDR2", 0x6,
642 asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq) + moffs));
643 asd_printk(STR_16BIT, "RET_ADDR1", 0x8,
644 asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq) + moffs));
645 asd_printk(STR_8BIT, "OPCODE_TO_CSEQ", 0xB,
646 asd_read_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq) + moffs));
647 asd_printk(STR_16BIT, "DATA_TO_CSEQ", 0xC,
648 asd_read_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq) + moffs));
649
650 asd_printk("LSEQ%d MDP 0 MODE 0 >>>>\n", lseq);
651 PRINT_LMIP_word(asd_ha, lseq, FIRST_INV_DDB_SITE);
652 PRINT_LMIP_word(asd_ha, lseq, EMPTY_TRANS_CTX);
653 PRINT_LMIP_word(asd_ha, lseq, RESP_LEN);
654 PRINT_LMIP_word(asd_ha, lseq, FIRST_INV_SCB_SITE);
655 PRINT_LMIP_dword(asd_ha, lseq, INTEN_SAVE);
656 PRINT_LMIP_byte(asd_ha, lseq, LINK_RST_FRM_LEN);
657 PRINT_LMIP_byte(asd_ha, lseq, LINK_RST_PROTOCOL);
658 PRINT_LMIP_byte(asd_ha, lseq, RESP_STATUS);
659 PRINT_LMIP_byte(asd_ha, lseq, LAST_LOADED_SGE);
660 PRINT_LMIP_byte(asd_ha, lseq, SAVE_SCBPTR);
661
662 asd_printk("LSEQ%d MDP 0 MODE 1 >>>>\n", lseq);
663 PRINT_LMIP_word(asd_ha, lseq, Q_XMIT_HEAD);
664 PRINT_LMIP_word(asd_ha, lseq, M1_EMPTY_TRANS_CTX);
665 PRINT_LMIP_word(asd_ha, lseq, INI_CONN_TAG);
666 PRINT_LMIP_byte(asd_ha, lseq, FAILED_OPEN_STATUS);
667 PRINT_LMIP_byte(asd_ha, lseq, XMIT_REQUEST_TYPE);
668 PRINT_LMIP_byte(asd_ha, lseq, M1_RESP_STATUS);
669 PRINT_LMIP_byte(asd_ha, lseq, M1_LAST_LOADED_SGE);
670 PRINT_LMIP_word(asd_ha, lseq, M1_SAVE_SCBPTR);
671
672 asd_printk("LSEQ%d MDP 0 MODE 2 >>>>\n", lseq);
673 PRINT_LMIP_word(asd_ha, lseq, PORT_COUNTER);
674 PRINT_LMIP_word(asd_ha, lseq, PM_TABLE_PTR);
675 PRINT_LMIP_word(asd_ha, lseq, SATA_INTERLOCK_TMR_SAVE);
676 PRINT_LMIP_word(asd_ha, lseq, IP_BITL);
677 PRINT_LMIP_word(asd_ha, lseq, COPY_SMP_CONN_TAG);
678 PRINT_LMIP_byte(asd_ha, lseq, P0M2_OFFS1AH);
679
680 asd_printk("LSEQ%d MDP 0 MODE 4/5 >>>>\n", lseq);
681 PRINT_LMIP_byte(asd_ha, lseq, SAVED_OOB_STATUS);
682 PRINT_LMIP_byte(asd_ha, lseq, SAVED_OOB_MODE);
683 PRINT_LMIP_word(asd_ha, lseq, Q_LINK_HEAD);
684 PRINT_LMIP_byte(asd_ha, lseq, LINK_RST_ERR);
685 PRINT_LMIP_byte(asd_ha, lseq, SAVED_OOB_SIGNALS);
686 PRINT_LMIP_byte(asd_ha, lseq, SAS_RESET_MODE);
687 PRINT_LMIP_byte(asd_ha, lseq, LINK_RESET_RETRY_COUNT);
688 PRINT_LMIP_byte(asd_ha, lseq, NUM_LINK_RESET_RETRIES);
689 PRINT_LMIP_word(asd_ha, lseq, OOB_INT_ENABLES);
690 PRINT_LMIP_word(asd_ha, lseq, NOTIFY_TIMER_TIMEOUT);
691 PRINT_LMIP_word(asd_ha, lseq, NOTIFY_TIMER_DOWN_COUNT);
692
693 asd_printk("LSEQ%d MDP 1 MODE 0 >>>>\n", lseq);
694 PRINT_LMIP_qword(asd_ha, lseq, SG_LIST_PTR_ADDR0);
695 PRINT_LMIP_qword(asd_ha, lseq, SG_LIST_PTR_ADDR1);
696
697 asd_printk("LSEQ%d MDP 1 MODE 1 >>>>\n", lseq);
698 PRINT_LMIP_qword(asd_ha, lseq, M1_SG_LIST_PTR_ADDR0);
699 PRINT_LMIP_qword(asd_ha, lseq, M1_SG_LIST_PTR_ADDR1);
700
701 asd_printk("LSEQ%d MDP 1 MODE 2 >>>>\n", lseq);
702 PRINT_LMIP_dword(asd_ha, lseq, INVALID_DWORD_COUNT);
703 PRINT_LMIP_dword(asd_ha, lseq, DISPARITY_ERROR_COUNT);
704 PRINT_LMIP_dword(asd_ha, lseq, LOSS_OF_SYNC_COUNT);
705
706 asd_printk("LSEQ%d MDP 1 MODE 4/5 >>>>\n", lseq);
707 PRINT_LMIP_dword(asd_ha, lseq, FRAME_TYPE_MASK);
708 PRINT_LMIP_dword(asd_ha, lseq, HASHED_SRC_ADDR_MASK_PRINT);
709 PRINT_LMIP_byte(asd_ha, lseq, NUM_FILL_BYTES_MASK);
710 PRINT_LMIP_word(asd_ha, lseq, TAG_MASK);
711 PRINT_LMIP_word(asd_ha, lseq, TARGET_PORT_XFER_TAG);
712 PRINT_LMIP_dword(asd_ha, lseq, DATA_OFFSET);
713
714 asd_printk("LSEQ%d MDP 2 MODE 0 >>>>\n", lseq);
715 PRINT_LMIP_dword(asd_ha, lseq, SMP_RCV_TIMER_TERM_TS);
716 PRINT_LMIP_byte(asd_ha, lseq, DEVICE_BITS);
717 PRINT_LMIP_word(asd_ha, lseq, SDB_DDB);
718 PRINT_LMIP_word(asd_ha, lseq, SDB_NUM_TAGS);
719 PRINT_LMIP_word(asd_ha, lseq, SDB_CURR_TAG);
720
721 asd_printk("LSEQ%d MDP 2 MODE 1 >>>>\n", lseq);
722 PRINT_LMIP_qword(asd_ha, lseq, TX_ID_ADDR_FRAME);
723 PRINT_LMIP_dword(asd_ha, lseq, OPEN_TIMER_TERM_TS);
724 PRINT_LMIP_dword(asd_ha, lseq, SRST_AS_TIMER_TERM_TS);
725 PRINT_LMIP_dword(asd_ha, lseq, LAST_LOADED_SG_EL);
726
727 asd_printk("LSEQ%d MDP 2 MODE 2 >>>>\n", lseq);
728 PRINT_LMIP_dword(asd_ha, lseq, CLOSE_TIMER_TERM_TS);
729 PRINT_LMIP_dword(asd_ha, lseq, BREAK_TIMER_TERM_TS);
730 PRINT_LMIP_dword(asd_ha, lseq, DWS_RESET_TIMER_TERM_TS);
731 PRINT_LMIP_dword(asd_ha, lseq, SATA_INTERLOCK_TIMER_TERM_TS);
732 PRINT_LMIP_dword(asd_ha, lseq, MCTL_TIMER_TERM_TS);
733
734 asd_printk("LSEQ%d MDP 2 MODE 4/5 >>>>\n", lseq);
735 PRINT_LMIP_dword(asd_ha, lseq, COMINIT_TIMER_TERM_TS);
736 PRINT_LMIP_dword(asd_ha, lseq, RCV_ID_TIMER_TERM_TS);
737 PRINT_LMIP_dword(asd_ha, lseq, RCV_FIS_TIMER_TERM_TS);
738 PRINT_LMIP_dword(asd_ha, lseq, DEV_PRES_TIMER_TERM_TS);
739}
740
741/**
742 * asd_dump_ddb_site -- dump a CSEQ DDB site
743 * @asd_ha: pointer to host adapter structure
744 * @site_no: site number of interest
745 */
746void asd_dump_target_ddb(struct asd_ha_struct *asd_ha, u16 site_no)
747{
748 if (site_no >= asd_ha->hw_prof.max_ddbs)
749 return;
750
751#define DDB_FIELDB(__name) \
752 asd_ddbsite_read_byte(asd_ha, site_no, \
753 offsetof(struct asd_ddb_ssp_smp_target_port, __name))
754#define DDB2_FIELDB(__name) \
755 asd_ddbsite_read_byte(asd_ha, site_no, \
756 offsetof(struct asd_ddb_stp_sata_target_port, __name))
757#define DDB_FIELDW(__name) \
758 asd_ddbsite_read_word(asd_ha, site_no, \
759 offsetof(struct asd_ddb_ssp_smp_target_port, __name))
760
761#define DDB_FIELDD(__name) \
762 asd_ddbsite_read_dword(asd_ha, site_no, \
763 offsetof(struct asd_ddb_ssp_smp_target_port, __name))
764
765 asd_printk("DDB: 0x%02x\n", site_no);
766 asd_printk("conn_type: 0x%02x\n", DDB_FIELDB(conn_type));
767 asd_printk("conn_rate: 0x%02x\n", DDB_FIELDB(conn_rate));
768 asd_printk("init_conn_tag: 0x%04x\n", be16_to_cpu(DDB_FIELDW(init_conn_tag)));
769 asd_printk("send_queue_head: 0x%04x\n", be16_to_cpu(DDB_FIELDW(send_queue_head)));
770 asd_printk("sq_suspended: 0x%02x\n", DDB_FIELDB(sq_suspended));
771 asd_printk("DDB Type: 0x%02x\n", DDB_FIELDB(ddb_type));
772 asd_printk("AWT Default: 0x%04x\n", DDB_FIELDW(awt_def));
773 asd_printk("compat_features: 0x%02x\n", DDB_FIELDB(compat_features));
774 asd_printk("Pathway Blocked Count: 0x%02x\n",
775 DDB_FIELDB(pathway_blocked_count));
776 asd_printk("arb_wait_time: 0x%04x\n", DDB_FIELDW(arb_wait_time));
777 asd_printk("more_compat_features: 0x%08x\n",
778 DDB_FIELDD(more_compat_features));
779 asd_printk("Conn Mask: 0x%02x\n", DDB_FIELDB(conn_mask));
780 asd_printk("flags: 0x%02x\n", DDB_FIELDB(flags));
781 asd_printk("flags2: 0x%02x\n", DDB2_FIELDB(flags2));
782 asd_printk("ExecQ Tail: 0x%04x\n",DDB_FIELDW(exec_queue_tail));
783 asd_printk("SendQ Tail: 0x%04x\n",DDB_FIELDW(send_queue_tail));
784 asd_printk("Active Task Count: 0x%04x\n",
785 DDB_FIELDW(active_task_count));
786 asd_printk("ITNL Reason: 0x%02x\n", DDB_FIELDB(itnl_reason));
787 asd_printk("ITNL Timeout Const: 0x%04x\n", DDB_FIELDW(itnl_timeout));
788 asd_printk("ITNL timestamp: 0x%08x\n", DDB_FIELDD(itnl_timestamp));
789}
790
791void asd_dump_ddb_0(struct asd_ha_struct *asd_ha)
792{
793#define DDB0_FIELDB(__name) \
794 asd_ddbsite_read_byte(asd_ha, 0, \
795 offsetof(struct asd_ddb_seq_shared, __name))
796#define DDB0_FIELDW(__name) \
797 asd_ddbsite_read_word(asd_ha, 0, \
798 offsetof(struct asd_ddb_seq_shared, __name))
799
800#define DDB0_FIELDD(__name) \
801 asd_ddbsite_read_dword(asd_ha,0 , \
802 offsetof(struct asd_ddb_seq_shared, __name))
803
804#define DDB0_FIELDA(__name, _o) \
805 asd_ddbsite_read_byte(asd_ha, 0, \
806 offsetof(struct asd_ddb_seq_shared, __name)+_o)
807
808
809 asd_printk("DDB: 0\n");
810 asd_printk("q_free_ddb_head:%04x\n", DDB0_FIELDW(q_free_ddb_head));
811 asd_printk("q_free_ddb_tail:%04x\n", DDB0_FIELDW(q_free_ddb_tail));
812 asd_printk("q_free_ddb_cnt:%04x\n", DDB0_FIELDW(q_free_ddb_cnt));
813 asd_printk("q_used_ddb_head:%04x\n", DDB0_FIELDW(q_used_ddb_head));
814 asd_printk("q_used_ddb_tail:%04x\n", DDB0_FIELDW(q_used_ddb_tail));
815 asd_printk("shared_mem_lock:%04x\n", DDB0_FIELDW(shared_mem_lock));
816 asd_printk("smp_conn_tag:%04x\n", DDB0_FIELDW(smp_conn_tag));
817 asd_printk("est_nexus_buf_cnt:%04x\n", DDB0_FIELDW(est_nexus_buf_cnt));
818 asd_printk("est_nexus_buf_thresh:%04x\n",
819 DDB0_FIELDW(est_nexus_buf_thresh));
820 asd_printk("conn_not_active:%02x\n", DDB0_FIELDB(conn_not_active));
821 asd_printk("phy_is_up:%02x\n", DDB0_FIELDB(phy_is_up));
822 asd_printk("port_map_by_links:%02x %02x %02x %02x "
823 "%02x %02x %02x %02x\n",
824 DDB0_FIELDA(port_map_by_links, 0),
825 DDB0_FIELDA(port_map_by_links, 1),
826 DDB0_FIELDA(port_map_by_links, 2),
827 DDB0_FIELDA(port_map_by_links, 3),
828 DDB0_FIELDA(port_map_by_links, 4),
829 DDB0_FIELDA(port_map_by_links, 5),
830 DDB0_FIELDA(port_map_by_links, 6),
831 DDB0_FIELDA(port_map_by_links, 7));
832}
833
834static void asd_dump_scb_site(struct asd_ha_struct *asd_ha, u16 site_no)
835{
836
837#define SCB_FIELDB(__name) \
838 asd_scbsite_read_byte(asd_ha, site_no, sizeof(struct scb_header) \
839 + offsetof(struct initiate_ssp_task, __name))
840#define SCB_FIELDW(__name) \
841 asd_scbsite_read_word(asd_ha, site_no, sizeof(struct scb_header) \
842 + offsetof(struct initiate_ssp_task, __name))
843#define SCB_FIELDD(__name) \
844 asd_scbsite_read_dword(asd_ha, site_no, sizeof(struct scb_header) \
845 + offsetof(struct initiate_ssp_task, __name))
846
847 asd_printk("Total Xfer Len: 0x%08x.\n", SCB_FIELDD(total_xfer_len));
848 asd_printk("Frame Type: 0x%02x.\n", SCB_FIELDB(ssp_frame.frame_type));
849 asd_printk("Tag: 0x%04x.\n", SCB_FIELDW(ssp_frame.tag));
850 asd_printk("Target Port Xfer Tag: 0x%04x.\n",
851 SCB_FIELDW(ssp_frame.tptt));
852 asd_printk("Data Offset: 0x%08x.\n", SCB_FIELDW(ssp_frame.data_offs));
853 asd_printk("Retry Count: 0x%02x.\n", SCB_FIELDB(retry_count));
854}
855
856/**
857 * asd_dump_scb_sites -- dump currently used CSEQ SCB sites
858 * @asd_ha: pointer to host adapter struct
859 */
860void asd_dump_scb_sites(struct asd_ha_struct *asd_ha)
861{
862 u16 site_no;
863
864 for (site_no = 0; site_no < asd_ha->hw_prof.max_scbs; site_no++) {
865 u8 opcode;
866
867 if (!SCB_SITE_VALID(site_no))
868 continue;
869
870 /* We are only interested in SCB sites currently used.
871 */
872 opcode = asd_scbsite_read_byte(asd_ha, site_no,
873 offsetof(struct scb_header,
874 opcode));
875 if (opcode == 0xFF)
876 continue;
877
878 asd_printk("\nSCB: 0x%x\n", site_no);
879 asd_dump_scb_site(asd_ha, site_no);
880 }
881}
882
883/**
884 * ads_dump_seq_state -- dump CSEQ and LSEQ states
885 * @asd_ha: pointer to host adapter structure
886 * @lseq_mask: mask of LSEQs of interest
887 */
888void asd_dump_seq_state(struct asd_ha_struct *asd_ha, u8 lseq_mask)
889{
890 int lseq;
891
892 asd_dump_cseq_state(asd_ha);
893
894 if (lseq_mask != 0)
895 for_each_sequencer(lseq_mask, lseq_mask, lseq)
896 asd_dump_lseq_state(asd_ha, lseq);
897}
898
899void asd_dump_frame_rcvd(struct asd_phy *phy,
900 struct done_list_struct *dl)
901{
902 unsigned long flags;
903 int i;
904
905 switch ((dl->status_block[1] & 0x70) >> 3) {
906 case SAS_PROTO_STP:
907 ASD_DPRINTK("STP proto device-to-host FIS:\n");
908 break;
909 default:
910 case SAS_PROTO_SSP:
911 ASD_DPRINTK("SAS proto IDENTIFY:\n");
912 break;
913 }
914 spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
915 for (i = 0; i < phy->sas_phy.frame_rcvd_size; i+=4)
916 ASD_DPRINTK("%02x: %02x %02x %02x %02x\n",
917 i,
918 phy->frame_rcvd[i],
919 phy->frame_rcvd[i+1],
920 phy->frame_rcvd[i+2],
921 phy->frame_rcvd[i+3]);
922 spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
923}
924
925static inline void asd_dump_scb(struct asd_ascb *ascb, int ind)
926{
927 asd_printk("scb%d: vaddr: 0x%p, dma_handle: 0x%llx, next: 0x%llx, "
928 "index:%d, opcode:0x%02x\n",
929 ind, ascb->dma_scb.vaddr,
930 (unsigned long long)ascb->dma_scb.dma_handle,
931 (unsigned long long)
932 le64_to_cpu(ascb->scb->header.next_scb),
933 le16_to_cpu(ascb->scb->header.index),
934 ascb->scb->header.opcode);
935}
936
937void asd_dump_scb_list(struct asd_ascb *ascb, int num)
938{
939 int i = 0;
940
941 asd_printk("dumping %d scbs:\n", num);
942
943 asd_dump_scb(ascb, i++);
944 --num;
945
946 if (num > 0 && !list_empty(&ascb->list)) {
947 struct list_head *el;
948
949 list_for_each(el, &ascb->list) {
950 struct asd_ascb *s = list_entry(el, struct asd_ascb,
951 list);
952 asd_dump_scb(s, i++);
953 if (--num <= 0)
954 break;
955 }
956 }
957}
958
959#endif /* ASD_DEBUG */
diff --git a/drivers/scsi/aic94xx/aic94xx_dump.h b/drivers/scsi/aic94xx/aic94xx_dump.h
new file mode 100644
index 000000000000..0c388e7da6bb
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_dump.h
@@ -0,0 +1,52 @@
1/*
2 * Aic94xx SAS/SATA driver dump header file.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#ifndef _AIC94XX_DUMP_H_
28#define _AIC94XX_DUMP_H_
29
30#ifdef ASD_DEBUG
31
32void asd_dump_ddb_0(struct asd_ha_struct *asd_ha);
33void asd_dump_target_ddb(struct asd_ha_struct *asd_ha, u16 site_no);
34void asd_dump_scb_sites(struct asd_ha_struct *asd_ha);
35void asd_dump_seq_state(struct asd_ha_struct *asd_ha, u8 lseq_mask);
36void asd_dump_frame_rcvd(struct asd_phy *phy,
37 struct done_list_struct *dl);
38void asd_dump_scb_list(struct asd_ascb *ascb, int num);
39#else /* ASD_DEBUG */
40
41static inline void asd_dump_ddb_0(struct asd_ha_struct *asd_ha) { }
42static inline void asd_dump_target_ddb(struct asd_ha_struct *asd_ha,
43 u16 site_no) { }
44static inline void asd_dump_scb_sites(struct asd_ha_struct *asd_ha) { }
45static inline void asd_dump_seq_state(struct asd_ha_struct *asd_ha,
46 u8 lseq_mask) { }
47static inline void asd_dump_frame_rcvd(struct asd_phy *phy,
48 struct done_list_struct *dl) { }
49static inline void asd_dump_scb_list(struct asd_ascb *ascb, int num) { }
50#endif /* ASD_DEBUG */
51
52#endif /* _AIC94XX_DUMP_H_ */
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
new file mode 100644
index 000000000000..a24201351108
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -0,0 +1,1376 @@
1/*
2 * Aic94xx SAS/SATA driver hardware interface.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#include <linux/pci.h>
28#include <linux/delay.h>
29#include <linux/module.h>
30
31#include "aic94xx.h"
32#include "aic94xx_reg.h"
33#include "aic94xx_hwi.h"
34#include "aic94xx_seq.h"
35#include "aic94xx_dump.h"
36
37u32 MBAR0_SWB_SIZE;
38
39/* ---------- Initialization ---------- */
40
41static void asd_get_user_sas_addr(struct asd_ha_struct *asd_ha)
42{
43 extern char sas_addr_str[];
44 /* If the user has specified a WWN it overrides other settings
45 */
46 if (sas_addr_str[0] != '\0')
47 asd_destringify_sas_addr(asd_ha->hw_prof.sas_addr,
48 sas_addr_str);
49 else if (asd_ha->hw_prof.sas_addr[0] != 0)
50 asd_stringify_sas_addr(sas_addr_str, asd_ha->hw_prof.sas_addr);
51}
52
53static void asd_propagate_sas_addr(struct asd_ha_struct *asd_ha)
54{
55 int i;
56
57 for (i = 0; i < ASD_MAX_PHYS; i++) {
58 if (asd_ha->hw_prof.phy_desc[i].sas_addr[0] == 0)
59 continue;
60 /* Set a phy's address only if it has none.
61 */
62 ASD_DPRINTK("setting phy%d addr to %llx\n", i,
63 SAS_ADDR(asd_ha->hw_prof.sas_addr));
64 memcpy(asd_ha->hw_prof.phy_desc[i].sas_addr,
65 asd_ha->hw_prof.sas_addr, SAS_ADDR_SIZE);
66 }
67}
68
69/* ---------- PHY initialization ---------- */
70
71static void asd_init_phy_identify(struct asd_phy *phy)
72{
73 phy->identify_frame = phy->id_frm_tok->vaddr;
74
75 memset(phy->identify_frame, 0, sizeof(*phy->identify_frame));
76
77 phy->identify_frame->dev_type = SAS_END_DEV;
78 if (phy->sas_phy.role & PHY_ROLE_INITIATOR)
79 phy->identify_frame->initiator_bits = phy->sas_phy.iproto;
80 if (phy->sas_phy.role & PHY_ROLE_TARGET)
81 phy->identify_frame->target_bits = phy->sas_phy.tproto;
82 memcpy(phy->identify_frame->sas_addr, phy->phy_desc->sas_addr,
83 SAS_ADDR_SIZE);
84 phy->identify_frame->phy_id = phy->sas_phy.id;
85}
86
87static int asd_init_phy(struct asd_phy *phy)
88{
89 struct asd_ha_struct *asd_ha = phy->sas_phy.ha->lldd_ha;
90 struct asd_sas_phy *sas_phy = &phy->sas_phy;
91
92 sas_phy->enabled = 1;
93 sas_phy->class = SAS;
94 sas_phy->iproto = SAS_PROTO_ALL;
95 sas_phy->tproto = 0;
96 sas_phy->type = PHY_TYPE_PHYSICAL;
97 sas_phy->role = PHY_ROLE_INITIATOR;
98 sas_phy->oob_mode = OOB_NOT_CONNECTED;
99 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
100
101 phy->id_frm_tok = asd_alloc_coherent(asd_ha,
102 sizeof(*phy->identify_frame),
103 GFP_KERNEL);
104 if (!phy->id_frm_tok) {
105 asd_printk("no mem for IDENTIFY for phy%d\n", sas_phy->id);
106 return -ENOMEM;
107 } else
108 asd_init_phy_identify(phy);
109
110 memset(phy->frame_rcvd, 0, sizeof(phy->frame_rcvd));
111
112 return 0;
113}
114
115static int asd_init_phys(struct asd_ha_struct *asd_ha)
116{
117 u8 i;
118 u8 phy_mask = asd_ha->hw_prof.enabled_phys;
119
120 for (i = 0; i < ASD_MAX_PHYS; i++) {
121 struct asd_phy *phy = &asd_ha->phys[i];
122
123 phy->phy_desc = &asd_ha->hw_prof.phy_desc[i];
124
125 phy->sas_phy.enabled = 0;
126 phy->sas_phy.id = i;
127 phy->sas_phy.sas_addr = &phy->phy_desc->sas_addr[0];
128 phy->sas_phy.frame_rcvd = &phy->frame_rcvd[0];
129 phy->sas_phy.ha = &asd_ha->sas_ha;
130 phy->sas_phy.lldd_phy = phy;
131 }
132
133 /* Now enable and initialize only the enabled phys. */
134 for_each_phy(phy_mask, phy_mask, i) {
135 int err = asd_init_phy(&asd_ha->phys[i]);
136 if (err)
137 return err;
138 }
139
140 return 0;
141}
142
143/* ---------- Sliding windows ---------- */
144
145static int asd_init_sw(struct asd_ha_struct *asd_ha)
146{
147 struct pci_dev *pcidev = asd_ha->pcidev;
148 int err;
149 u32 v;
150
151 /* Unlock MBARs */
152 err = pci_read_config_dword(pcidev, PCI_CONF_MBAR_KEY, &v);
153 if (err) {
154 asd_printk("couldn't access conf. space of %s\n",
155 pci_name(pcidev));
156 goto Err;
157 }
158 if (v)
159 err = pci_write_config_dword(pcidev, PCI_CONF_MBAR_KEY, v);
160 if (err) {
161 asd_printk("couldn't write to MBAR_KEY of %s\n",
162 pci_name(pcidev));
163 goto Err;
164 }
165
166 /* Set sliding windows A, B and C to point to proper internal
167 * memory regions.
168 */
169 pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWA, REG_BASE_ADDR);
170 pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWB,
171 REG_BASE_ADDR_CSEQCIO);
172 pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWC, REG_BASE_ADDR_EXSI);
173 asd_ha->io_handle[0].swa_base = REG_BASE_ADDR;
174 asd_ha->io_handle[0].swb_base = REG_BASE_ADDR_CSEQCIO;
175 asd_ha->io_handle[0].swc_base = REG_BASE_ADDR_EXSI;
176 MBAR0_SWB_SIZE = asd_ha->io_handle[0].len - 0x80;
177 if (!asd_ha->iospace) {
178 /* MBAR1 will point to OCM (On Chip Memory) */
179 pci_write_config_dword(pcidev, PCI_CONF_MBAR1, OCM_BASE_ADDR);
180 asd_ha->io_handle[1].swa_base = OCM_BASE_ADDR;
181 }
182 spin_lock_init(&asd_ha->iolock);
183Err:
184 return err;
185}
186
187/* ---------- SCB initialization ---------- */
188
189/**
190 * asd_init_scbs - manually allocate the first SCB.
191 * @asd_ha: pointer to host adapter structure
192 *
193 * This allocates the very first SCB which would be sent to the
194 * sequencer for execution. Its bus address is written to
195 * CSEQ_Q_NEW_POINTER, mode page 2, mode 8. Since the bus address of
196 * the _next_ scb to be DMA-ed to the host adapter is read from the last
197 * SCB DMA-ed to the host adapter, we have to always stay one step
198 * ahead of the sequencer and keep one SCB already allocated.
199 */
200static int asd_init_scbs(struct asd_ha_struct *asd_ha)
201{
202 struct asd_seq_data *seq = &asd_ha->seq;
203 int bitmap_bytes;
204
205 /* allocate the index array and bitmap */
206 asd_ha->seq.tc_index_bitmap_bits = asd_ha->hw_prof.max_scbs;
207 asd_ha->seq.tc_index_array = kzalloc(asd_ha->seq.tc_index_bitmap_bits*
208 sizeof(void *), GFP_KERNEL);
209 if (!asd_ha->seq.tc_index_array)
210 return -ENOMEM;
211
212 bitmap_bytes = (asd_ha->seq.tc_index_bitmap_bits+7)/8;
213 bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long);
214 asd_ha->seq.tc_index_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL);
215 if (!asd_ha->seq.tc_index_bitmap)
216 return -ENOMEM;
217
218 spin_lock_init(&seq->tc_index_lock);
219
220 seq->next_scb.size = sizeof(struct scb);
221 seq->next_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool, GFP_KERNEL,
222 &seq->next_scb.dma_handle);
223 if (!seq->next_scb.vaddr) {
224 kfree(asd_ha->seq.tc_index_bitmap);
225 kfree(asd_ha->seq.tc_index_array);
226 asd_ha->seq.tc_index_bitmap = NULL;
227 asd_ha->seq.tc_index_array = NULL;
228 return -ENOMEM;
229 }
230
231 seq->pending = 0;
232 spin_lock_init(&seq->pend_q_lock);
233 INIT_LIST_HEAD(&seq->pend_q);
234
235 return 0;
236}
237
238static inline void asd_get_max_scb_ddb(struct asd_ha_struct *asd_ha)
239{
240 asd_ha->hw_prof.max_scbs = asd_get_cmdctx_size(asd_ha)/ASD_SCB_SIZE;
241 asd_ha->hw_prof.max_ddbs = asd_get_devctx_size(asd_ha)/ASD_DDB_SIZE;
242 ASD_DPRINTK("max_scbs:%d, max_ddbs:%d\n",
243 asd_ha->hw_prof.max_scbs,
244 asd_ha->hw_prof.max_ddbs);
245}
246
247/* ---------- Done List initialization ---------- */
248
249static void asd_dl_tasklet_handler(unsigned long);
250
251static int asd_init_dl(struct asd_ha_struct *asd_ha)
252{
253 asd_ha->seq.actual_dl
254 = asd_alloc_coherent(asd_ha,
255 ASD_DL_SIZE * sizeof(struct done_list_struct),
256 GFP_KERNEL);
257 if (!asd_ha->seq.actual_dl)
258 return -ENOMEM;
259 asd_ha->seq.dl = asd_ha->seq.actual_dl->vaddr;
260 asd_ha->seq.dl_toggle = ASD_DEF_DL_TOGGLE;
261 asd_ha->seq.dl_next = 0;
262 tasklet_init(&asd_ha->seq.dl_tasklet, asd_dl_tasklet_handler,
263 (unsigned long) asd_ha);
264
265 return 0;
266}
267
268/* ---------- EDB and ESCB init ---------- */
269
270static int asd_alloc_edbs(struct asd_ha_struct *asd_ha, unsigned int gfp_flags)
271{
272 struct asd_seq_data *seq = &asd_ha->seq;
273 int i;
274
275 seq->edb_arr = kmalloc(seq->num_edbs*sizeof(*seq->edb_arr), gfp_flags);
276 if (!seq->edb_arr)
277 return -ENOMEM;
278
279 for (i = 0; i < seq->num_edbs; i++) {
280 seq->edb_arr[i] = asd_alloc_coherent(asd_ha, ASD_EDB_SIZE,
281 gfp_flags);
282 if (!seq->edb_arr[i])
283 goto Err_unroll;
284 memset(seq->edb_arr[i]->vaddr, 0, ASD_EDB_SIZE);
285 }
286
287 ASD_DPRINTK("num_edbs:%d\n", seq->num_edbs);
288
289 return 0;
290
291Err_unroll:
292 for (i-- ; i >= 0; i--)
293 asd_free_coherent(asd_ha, seq->edb_arr[i]);
294 kfree(seq->edb_arr);
295 seq->edb_arr = NULL;
296
297 return -ENOMEM;
298}
299
300static int asd_alloc_escbs(struct asd_ha_struct *asd_ha,
301 unsigned int gfp_flags)
302{
303 struct asd_seq_data *seq = &asd_ha->seq;
304 struct asd_ascb *escb;
305 int i, escbs;
306
307 seq->escb_arr = kmalloc(seq->num_escbs*sizeof(*seq->escb_arr),
308 gfp_flags);
309 if (!seq->escb_arr)
310 return -ENOMEM;
311
312 escbs = seq->num_escbs;
313 escb = asd_ascb_alloc_list(asd_ha, &escbs, gfp_flags);
314 if (!escb) {
315 asd_printk("couldn't allocate list of escbs\n");
316 goto Err;
317 }
318 seq->num_escbs -= escbs; /* subtract what was not allocated */
319 ASD_DPRINTK("num_escbs:%d\n", seq->num_escbs);
320
321 for (i = 0; i < seq->num_escbs; i++, escb = list_entry(escb->list.next,
322 struct asd_ascb,
323 list)) {
324 seq->escb_arr[i] = escb;
325 escb->scb->header.opcode = EMPTY_SCB;
326 }
327
328 return 0;
329Err:
330 kfree(seq->escb_arr);
331 seq->escb_arr = NULL;
332 return -ENOMEM;
333
334}
335
336static void asd_assign_edbs2escbs(struct asd_ha_struct *asd_ha)
337{
338 struct asd_seq_data *seq = &asd_ha->seq;
339 int i, k, z = 0;
340
341 for (i = 0; i < seq->num_escbs; i++) {
342 struct asd_ascb *ascb = seq->escb_arr[i];
343 struct empty_scb *escb = &ascb->scb->escb;
344
345 ascb->edb_index = z;
346
347 escb->num_valid = ASD_EDBS_PER_SCB;
348
349 for (k = 0; k < ASD_EDBS_PER_SCB; k++) {
350 struct sg_el *eb = &escb->eb[k];
351 struct asd_dma_tok *edb = seq->edb_arr[z++];
352
353 memset(eb, 0, sizeof(*eb));
354 eb->bus_addr = cpu_to_le64(((u64) edb->dma_handle));
355 eb->size = cpu_to_le32(((u32) edb->size));
356 }
357 }
358}
359
360/**
361 * asd_init_escbs -- allocate and initialize empty scbs
362 * @asd_ha: pointer to host adapter structure
363 *
364 * An empty SCB has sg_elements of ASD_EDBS_PER_SCB (7) buffers.
365 * They transport sense data, etc.
366 */
367static int asd_init_escbs(struct asd_ha_struct *asd_ha)
368{
369 struct asd_seq_data *seq = &asd_ha->seq;
370 int err = 0;
371
372 /* Allocate two empty data buffers (edb) per sequencer. */
373 int edbs = 2*(1+asd_ha->hw_prof.num_phys);
374
375 seq->num_escbs = (edbs+ASD_EDBS_PER_SCB-1)/ASD_EDBS_PER_SCB;
376 seq->num_edbs = seq->num_escbs * ASD_EDBS_PER_SCB;
377
378 err = asd_alloc_edbs(asd_ha, GFP_KERNEL);
379 if (err) {
380 asd_printk("couldn't allocate edbs\n");
381 return err;
382 }
383
384 err = asd_alloc_escbs(asd_ha, GFP_KERNEL);
385 if (err) {
386 asd_printk("couldn't allocate escbs\n");
387 return err;
388 }
389
390 asd_assign_edbs2escbs(asd_ha);
391 /* In order to insure that normal SCBs do not overfill sequencer
392 * memory and leave no space for escbs (halting condition),
393 * we increment pending here by the number of escbs. However,
394 * escbs are never pending.
395 */
396 seq->pending = seq->num_escbs;
397 seq->can_queue = 1 + (asd_ha->hw_prof.max_scbs - seq->pending)/2;
398
399 return 0;
400}
401
402/* ---------- HW initialization ---------- */
403
404/**
405 * asd_chip_hardrst -- hard reset the chip
406 * @asd_ha: pointer to host adapter structure
407 *
408 * This takes 16 cycles and is synchronous to CFCLK, which runs
409 * at 200 MHz, so this should take at most 80 nanoseconds.
410 */
411int asd_chip_hardrst(struct asd_ha_struct *asd_ha)
412{
413 int i;
414 int count = 100;
415 u32 reg;
416
417 for (i = 0 ; i < 4 ; i++) {
418 asd_write_reg_dword(asd_ha, COMBIST, HARDRST);
419 }
420
421 do {
422 udelay(1);
423 reg = asd_read_reg_dword(asd_ha, CHIMINT);
424 if (reg & HARDRSTDET) {
425 asd_write_reg_dword(asd_ha, CHIMINT,
426 HARDRSTDET|PORRSTDET);
427 return 0;
428 }
429 } while (--count > 0);
430
431 return -ENODEV;
432}
433
434/**
435 * asd_init_chip -- initialize the chip
436 * @asd_ha: pointer to host adapter structure
437 *
438 * Hard resets the chip, disables HA interrupts, downloads the sequnecer
439 * microcode and starts the sequencers. The caller has to explicitly
440 * enable HA interrupts with asd_enable_ints(asd_ha).
441 */
442static int asd_init_chip(struct asd_ha_struct *asd_ha)
443{
444 int err;
445
446 err = asd_chip_hardrst(asd_ha);
447 if (err) {
448 asd_printk("couldn't hard reset %s\n",
449 pci_name(asd_ha->pcidev));
450 goto out;
451 }
452
453 asd_disable_ints(asd_ha);
454
455 err = asd_init_seqs(asd_ha);
456 if (err) {
457 asd_printk("couldn't init seqs for %s\n",
458 pci_name(asd_ha->pcidev));
459 goto out;
460 }
461
462 err = asd_start_seqs(asd_ha);
463 if (err) {
464 asd_printk("coudln't start seqs for %s\n",
465 pci_name(asd_ha->pcidev));
466 goto out;
467 }
468out:
469 return err;
470}
471
472#define MAX_DEVS ((OCM_MAX_SIZE) / (ASD_DDB_SIZE))
473
474static int max_devs = 0;
475module_param_named(max_devs, max_devs, int, S_IRUGO);
476MODULE_PARM_DESC(max_devs, "\n"
477 "\tMaximum number of SAS devices to support (not LUs).\n"
478 "\tDefault: 2176, Maximum: 65663.\n");
479
480static int max_cmnds = 0;
481module_param_named(max_cmnds, max_cmnds, int, S_IRUGO);
482MODULE_PARM_DESC(max_cmnds, "\n"
483 "\tMaximum number of commands queuable.\n"
484 "\tDefault: 512, Maximum: 66047.\n");
485
486static void asd_extend_devctx_ocm(struct asd_ha_struct *asd_ha)
487{
488 unsigned long dma_addr = OCM_BASE_ADDR;
489 u32 d;
490
491 dma_addr -= asd_ha->hw_prof.max_ddbs * ASD_DDB_SIZE;
492 asd_write_reg_addr(asd_ha, DEVCTXBASE, (dma_addr_t) dma_addr);
493 d = asd_read_reg_dword(asd_ha, CTXDOMAIN);
494 d |= 4;
495 asd_write_reg_dword(asd_ha, CTXDOMAIN, d);
496 asd_ha->hw_prof.max_ddbs += MAX_DEVS;
497}
498
499static int asd_extend_devctx(struct asd_ha_struct *asd_ha)
500{
501 dma_addr_t dma_handle;
502 unsigned long dma_addr;
503 u32 d;
504 int size;
505
506 asd_extend_devctx_ocm(asd_ha);
507
508 asd_ha->hw_prof.ddb_ext = NULL;
509 if (max_devs <= asd_ha->hw_prof.max_ddbs || max_devs > 0xFFFF) {
510 max_devs = asd_ha->hw_prof.max_ddbs;
511 return 0;
512 }
513
514 size = (max_devs - asd_ha->hw_prof.max_ddbs + 1) * ASD_DDB_SIZE;
515
516 asd_ha->hw_prof.ddb_ext = asd_alloc_coherent(asd_ha, size, GFP_KERNEL);
517 if (!asd_ha->hw_prof.ddb_ext) {
518 asd_printk("couldn't allocate memory for %d devices\n",
519 max_devs);
520 max_devs = asd_ha->hw_prof.max_ddbs;
521 return -ENOMEM;
522 }
523 dma_handle = asd_ha->hw_prof.ddb_ext->dma_handle;
524 dma_addr = ALIGN((unsigned long) dma_handle, ASD_DDB_SIZE);
525 dma_addr -= asd_ha->hw_prof.max_ddbs * ASD_DDB_SIZE;
526 dma_handle = (dma_addr_t) dma_addr;
527 asd_write_reg_addr(asd_ha, DEVCTXBASE, dma_handle);
528 d = asd_read_reg_dword(asd_ha, CTXDOMAIN);
529 d &= ~4;
530 asd_write_reg_dword(asd_ha, CTXDOMAIN, d);
531
532 asd_ha->hw_prof.max_ddbs = max_devs;
533
534 return 0;
535}
536
537static int asd_extend_cmdctx(struct asd_ha_struct *asd_ha)
538{
539 dma_addr_t dma_handle;
540 unsigned long dma_addr;
541 u32 d;
542 int size;
543
544 asd_ha->hw_prof.scb_ext = NULL;
545 if (max_cmnds <= asd_ha->hw_prof.max_scbs || max_cmnds > 0xFFFF) {
546 max_cmnds = asd_ha->hw_prof.max_scbs;
547 return 0;
548 }
549
550 size = (max_cmnds - asd_ha->hw_prof.max_scbs + 1) * ASD_SCB_SIZE;
551
552 asd_ha->hw_prof.scb_ext = asd_alloc_coherent(asd_ha, size, GFP_KERNEL);
553 if (!asd_ha->hw_prof.scb_ext) {
554 asd_printk("couldn't allocate memory for %d commands\n",
555 max_cmnds);
556 max_cmnds = asd_ha->hw_prof.max_scbs;
557 return -ENOMEM;
558 }
559 dma_handle = asd_ha->hw_prof.scb_ext->dma_handle;
560 dma_addr = ALIGN((unsigned long) dma_handle, ASD_SCB_SIZE);
561 dma_addr -= asd_ha->hw_prof.max_scbs * ASD_SCB_SIZE;
562 dma_handle = (dma_addr_t) dma_addr;
563 asd_write_reg_addr(asd_ha, CMDCTXBASE, dma_handle);
564 d = asd_read_reg_dword(asd_ha, CTXDOMAIN);
565 d &= ~1;
566 asd_write_reg_dword(asd_ha, CTXDOMAIN, d);
567
568 asd_ha->hw_prof.max_scbs = max_cmnds;
569
570 return 0;
571}
572
573/**
574 * asd_init_ctxmem -- initialize context memory
575 * asd_ha: pointer to host adapter structure
576 *
577 * This function sets the maximum number of SCBs and
578 * DDBs which can be used by the sequencer. This is normally
579 * 512 and 128 respectively. If support for more SCBs or more DDBs
580 * is required then CMDCTXBASE, DEVCTXBASE and CTXDOMAIN are
581 * initialized here to extend context memory to point to host memory,
582 * thus allowing unlimited support for SCBs and DDBs -- only limited
583 * by host memory.
584 */
585static int asd_init_ctxmem(struct asd_ha_struct *asd_ha)
586{
587 int bitmap_bytes;
588
589 asd_get_max_scb_ddb(asd_ha);
590 asd_extend_devctx(asd_ha);
591 asd_extend_cmdctx(asd_ha);
592
593 /* The kernel wants bitmaps to be unsigned long sized. */
594 bitmap_bytes = (asd_ha->hw_prof.max_ddbs+7)/8;
595 bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long);
596 asd_ha->hw_prof.ddb_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL);
597 if (!asd_ha->hw_prof.ddb_bitmap)
598 return -ENOMEM;
599 spin_lock_init(&asd_ha->hw_prof.ddb_lock);
600
601 return 0;
602}
603
604int asd_init_hw(struct asd_ha_struct *asd_ha)
605{
606 int err;
607 u32 v;
608
609 err = asd_init_sw(asd_ha);
610 if (err)
611 return err;
612
613 err = pci_read_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL, &v);
614 if (err) {
615 asd_printk("couldn't read PCIC_HSTPCIX_CNTRL of %s\n",
616 pci_name(asd_ha->pcidev));
617 return err;
618 }
619 pci_write_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL,
620 v | SC_TMR_DIS);
621 if (err) {
622 asd_printk("couldn't disable split completion timer of %s\n",
623 pci_name(asd_ha->pcidev));
624 return err;
625 }
626
627 err = asd_read_ocm(asd_ha);
628 if (err) {
629 asd_printk("couldn't read ocm(%d)\n", err);
630 /* While suspicios, it is not an error that we
631 * couldn't read the OCM. */
632 }
633
634 err = asd_read_flash(asd_ha);
635 if (err) {
636 asd_printk("couldn't read flash(%d)\n", err);
637 /* While suspicios, it is not an error that we
638 * couldn't read FLASH memory.
639 */
640 }
641
642 asd_init_ctxmem(asd_ha);
643
644 asd_get_user_sas_addr(asd_ha);
645 if (!asd_ha->hw_prof.sas_addr[0]) {
646 asd_printk("No SAS Address provided for %s\n",
647 pci_name(asd_ha->pcidev));
648 err = -ENODEV;
649 goto Out;
650 }
651
652 asd_propagate_sas_addr(asd_ha);
653
654 err = asd_init_phys(asd_ha);
655 if (err) {
656 asd_printk("couldn't initialize phys for %s\n",
657 pci_name(asd_ha->pcidev));
658 goto Out;
659 }
660
661 err = asd_init_scbs(asd_ha);
662 if (err) {
663 asd_printk("couldn't initialize scbs for %s\n",
664 pci_name(asd_ha->pcidev));
665 goto Out;
666 }
667
668 err = asd_init_dl(asd_ha);
669 if (err) {
670 asd_printk("couldn't initialize the done list:%d\n",
671 err);
672 goto Out;
673 }
674
675 err = asd_init_escbs(asd_ha);
676 if (err) {
677 asd_printk("couldn't initialize escbs\n");
678 goto Out;
679 }
680
681 err = asd_init_chip(asd_ha);
682 if (err) {
683 asd_printk("couldn't init the chip\n");
684 goto Out;
685 }
686Out:
687 return err;
688}
689
690/* ---------- Chip reset ---------- */
691
692/**
693 * asd_chip_reset -- reset the host adapter, etc
694 * @asd_ha: pointer to host adapter structure of interest
695 *
696 * Called from the ISR. Hard reset the chip. Let everything
697 * timeout. This should be no different than hot-unplugging the
698 * host adapter. Once everything times out we'll init the chip with
699 * a call to asd_init_chip() and enable interrupts with asd_enable_ints().
700 * XXX finish.
701 */
702static void asd_chip_reset(struct asd_ha_struct *asd_ha)
703{
704 struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
705
706 ASD_DPRINTK("chip reset for %s\n", pci_name(asd_ha->pcidev));
707 asd_chip_hardrst(asd_ha);
708 sas_ha->notify_ha_event(sas_ha, HAE_RESET);
709}
710
711/* ---------- Done List Routines ---------- */
712
713static void asd_dl_tasklet_handler(unsigned long data)
714{
715 struct asd_ha_struct *asd_ha = (struct asd_ha_struct *) data;
716 struct asd_seq_data *seq = &asd_ha->seq;
717 unsigned long flags;
718
719 while (1) {
720 struct done_list_struct *dl = &seq->dl[seq->dl_next];
721 struct asd_ascb *ascb;
722
723 if ((dl->toggle & DL_TOGGLE_MASK) != seq->dl_toggle)
724 break;
725
726 /* find the aSCB */
727 spin_lock_irqsave(&seq->tc_index_lock, flags);
728 ascb = asd_tc_index_find(seq, (int)le16_to_cpu(dl->index));
729 spin_unlock_irqrestore(&seq->tc_index_lock, flags);
730 if (unlikely(!ascb)) {
731 ASD_DPRINTK("BUG:sequencer:dl:no ascb?!\n");
732 goto next_1;
733 } else if (ascb->scb->header.opcode == EMPTY_SCB) {
734 goto out;
735 } else if (!ascb->uldd_timer && !del_timer(&ascb->timer)) {
736 goto next_1;
737 }
738 spin_lock_irqsave(&seq->pend_q_lock, flags);
739 list_del_init(&ascb->list);
740 seq->pending--;
741 spin_unlock_irqrestore(&seq->pend_q_lock, flags);
742 out:
743 ascb->tasklet_complete(ascb, dl);
744
745 next_1:
746 seq->dl_next = (seq->dl_next + 1) & (ASD_DL_SIZE-1);
747 if (!seq->dl_next)
748 seq->dl_toggle ^= DL_TOGGLE_MASK;
749 }
750}
751
752/* ---------- Interrupt Service Routines ---------- */
753
754/**
755 * asd_process_donelist_isr -- schedule processing of done list entries
756 * @asd_ha: pointer to host adapter structure
757 */
758static inline void asd_process_donelist_isr(struct asd_ha_struct *asd_ha)
759{
760 tasklet_schedule(&asd_ha->seq.dl_tasklet);
761}
762
763/**
764 * asd_com_sas_isr -- process device communication interrupt (COMINT)
765 * @asd_ha: pointer to host adapter structure
766 */
767static inline void asd_com_sas_isr(struct asd_ha_struct *asd_ha)
768{
769 u32 comstat = asd_read_reg_dword(asd_ha, COMSTAT);
770
771 /* clear COMSTAT int */
772 asd_write_reg_dword(asd_ha, COMSTAT, 0xFFFFFFFF);
773
774 if (comstat & CSBUFPERR) {
775 asd_printk("%s: command/status buffer dma parity error\n",
776 pci_name(asd_ha->pcidev));
777 } else if (comstat & CSERR) {
778 int i;
779 u32 dmaerr = asd_read_reg_dword(asd_ha, DMAERR);
780 dmaerr &= 0xFF;
781 asd_printk("%s: command/status dma error, DMAERR: 0x%02x, "
782 "CSDMAADR: 0x%04x, CSDMAADR+4: 0x%04x\n",
783 pci_name(asd_ha->pcidev),
784 dmaerr,
785 asd_read_reg_dword(asd_ha, CSDMAADR),
786 asd_read_reg_dword(asd_ha, CSDMAADR+4));
787 asd_printk("CSBUFFER:\n");
788 for (i = 0; i < 8; i++) {
789 asd_printk("%08x %08x %08x %08x\n",
790 asd_read_reg_dword(asd_ha, CSBUFFER),
791 asd_read_reg_dword(asd_ha, CSBUFFER+4),
792 asd_read_reg_dword(asd_ha, CSBUFFER+8),
793 asd_read_reg_dword(asd_ha, CSBUFFER+12));
794 }
795 asd_dump_seq_state(asd_ha, 0);
796 } else if (comstat & OVLYERR) {
797 u32 dmaerr = asd_read_reg_dword(asd_ha, DMAERR);
798 dmaerr = (dmaerr >> 8) & 0xFF;
799 asd_printk("%s: overlay dma error:0x%x\n",
800 pci_name(asd_ha->pcidev),
801 dmaerr);
802 }
803 asd_chip_reset(asd_ha);
804}
805
806static inline void asd_arp2_err(struct asd_ha_struct *asd_ha, u32 dchstatus)
807{
808 static const char *halt_code[256] = {
809 "UNEXPECTED_INTERRUPT0",
810 "UNEXPECTED_INTERRUPT1",
811 "UNEXPECTED_INTERRUPT2",
812 "UNEXPECTED_INTERRUPT3",
813 "UNEXPECTED_INTERRUPT4",
814 "UNEXPECTED_INTERRUPT5",
815 "UNEXPECTED_INTERRUPT6",
816 "UNEXPECTED_INTERRUPT7",
817 "UNEXPECTED_INTERRUPT8",
818 "UNEXPECTED_INTERRUPT9",
819 "UNEXPECTED_INTERRUPT10",
820 [11 ... 19] = "unknown[11,19]",
821 "NO_FREE_SCB_AVAILABLE",
822 "INVALID_SCB_OPCODE",
823 "INVALID_MBX_OPCODE",
824 "INVALID_ATA_STATE",
825 "ATA_QUEUE_FULL",
826 "ATA_TAG_TABLE_FAULT",
827 "ATA_TAG_MASK_FAULT",
828 "BAD_LINK_QUEUE_STATE",
829 "DMA2CHIM_QUEUE_ERROR",
830 "EMPTY_SCB_LIST_FULL",
831 "unknown[30]",
832 "IN_USE_SCB_ON_FREE_LIST",
833 "BAD_OPEN_WAIT_STATE",
834 "INVALID_STP_AFFILIATION",
835 "unknown[34]",
836 "EXEC_QUEUE_ERROR",
837 "TOO_MANY_EMPTIES_NEEDED",
838 "EMPTY_REQ_QUEUE_ERROR",
839 "Q_MONIRTT_MGMT_ERROR",
840 "TARGET_MODE_FLOW_ERROR",
841 "DEVICE_QUEUE_NOT_FOUND",
842 "START_IRTT_TIMER_ERROR",
843 "ABORT_TASK_ILLEGAL_REQ",
844 [43 ... 255] = "unknown[43,255]"
845 };
846
847 if (dchstatus & CSEQINT) {
848 u32 arp2int = asd_read_reg_dword(asd_ha, CARP2INT);
849
850 if (arp2int & (ARP2WAITTO|ARP2ILLOPC|ARP2PERR|ARP2CIOPERR)) {
851 asd_printk("%s: CSEQ arp2int:0x%x\n",
852 pci_name(asd_ha->pcidev),
853 arp2int);
854 } else if (arp2int & ARP2HALTC)
855 asd_printk("%s: CSEQ halted: %s\n",
856 pci_name(asd_ha->pcidev),
857 halt_code[(arp2int>>16)&0xFF]);
858 else
859 asd_printk("%s: CARP2INT:0x%x\n",
860 pci_name(asd_ha->pcidev),
861 arp2int);
862 }
863 if (dchstatus & LSEQINT_MASK) {
864 int lseq;
865 u8 lseq_mask = dchstatus & LSEQINT_MASK;
866
867 for_each_sequencer(lseq_mask, lseq_mask, lseq) {
868 u32 arp2int = asd_read_reg_dword(asd_ha,
869 LmARP2INT(lseq));
870 if (arp2int & (ARP2WAITTO | ARP2ILLOPC | ARP2PERR
871 | ARP2CIOPERR)) {
872 asd_printk("%s: LSEQ%d arp2int:0x%x\n",
873 pci_name(asd_ha->pcidev),
874 lseq, arp2int);
875 /* XXX we should only do lseq reset */
876 } else if (arp2int & ARP2HALTC)
877 asd_printk("%s: LSEQ%d halted: %s\n",
878 pci_name(asd_ha->pcidev),
879 lseq,halt_code[(arp2int>>16)&0xFF]);
880 else
881 asd_printk("%s: LSEQ%d ARP2INT:0x%x\n",
882 pci_name(asd_ha->pcidev), lseq,
883 arp2int);
884 }
885 }
886 asd_chip_reset(asd_ha);
887}
888
889/**
890 * asd_dch_sas_isr -- process device channel interrupt (DEVINT)
891 * @asd_ha: pointer to host adapter structure
892 */
893static inline void asd_dch_sas_isr(struct asd_ha_struct *asd_ha)
894{
895 u32 dchstatus = asd_read_reg_dword(asd_ha, DCHSTATUS);
896
897 if (dchstatus & CFIFTOERR) {
898 asd_printk("%s: CFIFTOERR\n", pci_name(asd_ha->pcidev));
899 asd_chip_reset(asd_ha);
900 } else
901 asd_arp2_err(asd_ha, dchstatus);
902}
903
904/**
905 * ads_rbi_exsi_isr -- process external system interface interrupt (INITERR)
906 * @asd_ha: pointer to host adapter structure
907 */
908static inline void asd_rbi_exsi_isr(struct asd_ha_struct *asd_ha)
909{
910 u32 stat0r = asd_read_reg_dword(asd_ha, ASISTAT0R);
911
912 if (!(stat0r & ASIERR)) {
913 asd_printk("hmm, EXSI interrupted but no error?\n");
914 return;
915 }
916
917 if (stat0r & ASIFMTERR) {
918 asd_printk("ASI SEEPROM format error for %s\n",
919 pci_name(asd_ha->pcidev));
920 } else if (stat0r & ASISEECHKERR) {
921 u32 stat1r = asd_read_reg_dword(asd_ha, ASISTAT1R);
922 asd_printk("ASI SEEPROM checksum 0x%x error for %s\n",
923 stat1r & CHECKSUM_MASK,
924 pci_name(asd_ha->pcidev));
925 } else {
926 u32 statr = asd_read_reg_dword(asd_ha, ASIERRSTATR);
927
928 if (!(statr & CPI2ASIMSTERR_MASK)) {
929 ASD_DPRINTK("hmm, ASIERR?\n");
930 return;
931 } else {
932 u32 addr = asd_read_reg_dword(asd_ha, ASIERRADDR);
933 u32 data = asd_read_reg_dword(asd_ha, ASIERRDATAR);
934
935 asd_printk("%s: CPI2 xfer err: addr: 0x%x, wdata: 0x%x, "
936 "count: 0x%x, byteen: 0x%x, targerr: 0x%x "
937 "master id: 0x%x, master err: 0x%x\n",
938 pci_name(asd_ha->pcidev),
939 addr, data,
940 (statr & CPI2ASIBYTECNT_MASK) >> 16,
941 (statr & CPI2ASIBYTEEN_MASK) >> 12,
942 (statr & CPI2ASITARGERR_MASK) >> 8,
943 (statr & CPI2ASITARGMID_MASK) >> 4,
944 (statr & CPI2ASIMSTERR_MASK));
945 }
946 }
947 asd_chip_reset(asd_ha);
948}
949
950/**
951 * asd_hst_pcix_isr -- process host interface interrupts
952 * @asd_ha: pointer to host adapter structure
953 *
954 * Asserted on PCIX errors: target abort, etc.
955 */
956static inline void asd_hst_pcix_isr(struct asd_ha_struct *asd_ha)
957{
958 u16 status;
959 u32 pcix_status;
960 u32 ecc_status;
961
962 pci_read_config_word(asd_ha->pcidev, PCI_STATUS, &status);
963 pci_read_config_dword(asd_ha->pcidev, PCIX_STATUS, &pcix_status);
964 pci_read_config_dword(asd_ha->pcidev, ECC_CTRL_STAT, &ecc_status);
965
966 if (status & PCI_STATUS_DETECTED_PARITY)
967 asd_printk("parity error for %s\n", pci_name(asd_ha->pcidev));
968 else if (status & PCI_STATUS_REC_MASTER_ABORT)
969 asd_printk("master abort for %s\n", pci_name(asd_ha->pcidev));
970 else if (status & PCI_STATUS_REC_TARGET_ABORT)
971 asd_printk("target abort for %s\n", pci_name(asd_ha->pcidev));
972 else if (status & PCI_STATUS_PARITY)
973 asd_printk("data parity for %s\n", pci_name(asd_ha->pcidev));
974 else if (pcix_status & RCV_SCE) {
975 asd_printk("received split completion error for %s\n",
976 pci_name(asd_ha->pcidev));
977 pci_write_config_dword(asd_ha->pcidev,PCIX_STATUS,pcix_status);
978 /* XXX: Abort task? */
979 return;
980 } else if (pcix_status & UNEXP_SC) {
981 asd_printk("unexpected split completion for %s\n",
982 pci_name(asd_ha->pcidev));
983 pci_write_config_dword(asd_ha->pcidev,PCIX_STATUS,pcix_status);
984 /* ignore */
985 return;
986 } else if (pcix_status & SC_DISCARD)
987 asd_printk("split completion discarded for %s\n",
988 pci_name(asd_ha->pcidev));
989 else if (ecc_status & UNCOR_ECCERR)
990 asd_printk("uncorrectable ECC error for %s\n",
991 pci_name(asd_ha->pcidev));
992 asd_chip_reset(asd_ha);
993}
994
995/**
996 * asd_hw_isr -- host adapter interrupt service routine
997 * @irq: ignored
998 * @dev_id: pointer to host adapter structure
999 * @regs: ignored
1000 *
1001 * The ISR processes done list entries and level 3 error handling.
1002 */
1003irqreturn_t asd_hw_isr(int irq, void *dev_id, struct pt_regs *regs)
1004{
1005 struct asd_ha_struct *asd_ha = dev_id;
1006 u32 chimint = asd_read_reg_dword(asd_ha, CHIMINT);
1007
1008 if (!chimint)
1009 return IRQ_NONE;
1010
1011 asd_write_reg_dword(asd_ha, CHIMINT, chimint);
1012 (void) asd_read_reg_dword(asd_ha, CHIMINT);
1013
1014 if (chimint & DLAVAIL)
1015 asd_process_donelist_isr(asd_ha);
1016 if (chimint & COMINT)
1017 asd_com_sas_isr(asd_ha);
1018 if (chimint & DEVINT)
1019 asd_dch_sas_isr(asd_ha);
1020 if (chimint & INITERR)
1021 asd_rbi_exsi_isr(asd_ha);
1022 if (chimint & HOSTERR)
1023 asd_hst_pcix_isr(asd_ha);
1024
1025 return IRQ_HANDLED;
1026}
1027
1028/* ---------- SCB handling ---------- */
1029
1030static inline struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha,
1031 unsigned int gfp_flags)
1032{
1033 extern kmem_cache_t *asd_ascb_cache;
1034 struct asd_seq_data *seq = &asd_ha->seq;
1035 struct asd_ascb *ascb;
1036 unsigned long flags;
1037
1038 ascb = kmem_cache_alloc(asd_ascb_cache, gfp_flags);
1039
1040 if (ascb) {
1041 memset(ascb, 0, sizeof(*ascb));
1042 ascb->dma_scb.size = sizeof(struct scb);
1043 ascb->dma_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool,
1044 gfp_flags,
1045 &ascb->dma_scb.dma_handle);
1046 if (!ascb->dma_scb.vaddr) {
1047 kmem_cache_free(asd_ascb_cache, ascb);
1048 return NULL;
1049 }
1050 memset(ascb->dma_scb.vaddr, 0, sizeof(struct scb));
1051 asd_init_ascb(asd_ha, ascb);
1052
1053 spin_lock_irqsave(&seq->tc_index_lock, flags);
1054 ascb->tc_index = asd_tc_index_get(seq, ascb);
1055 spin_unlock_irqrestore(&seq->tc_index_lock, flags);
1056 if (ascb->tc_index == -1)
1057 goto undo;
1058
1059 ascb->scb->header.index = cpu_to_le16((u16)ascb->tc_index);
1060 }
1061
1062 return ascb;
1063undo:
1064 dma_pool_free(asd_ha->scb_pool, ascb->dma_scb.vaddr,
1065 ascb->dma_scb.dma_handle);
1066 kmem_cache_free(asd_ascb_cache, ascb);
1067 ASD_DPRINTK("no index for ascb\n");
1068 return NULL;
1069}
1070
1071/**
1072 * asd_ascb_alloc_list -- allocate a list of aSCBs
1073 * @asd_ha: pointer to host adapter structure
1074 * @num: pointer to integer number of aSCBs
1075 * @gfp_flags: GFP_ flags.
1076 *
1077 * This is the only function which is used to allocate aSCBs.
1078 * It can allocate one or many. If more than one, then they form
1079 * a linked list in two ways: by their list field of the ascb struct
1080 * and by the next_scb field of the scb_header.
1081 *
1082 * Returns NULL if no memory was available, else pointer to a list
1083 * of ascbs. When this function returns, @num would be the number
1084 * of SCBs which were not able to be allocated, 0 if all requested
1085 * were able to be allocated.
1086 */
1087struct asd_ascb *asd_ascb_alloc_list(struct asd_ha_struct
1088 *asd_ha, int *num,
1089 unsigned int gfp_flags)
1090{
1091 struct asd_ascb *first = NULL;
1092
1093 for ( ; *num > 0; --*num) {
1094 struct asd_ascb *ascb = asd_ascb_alloc(asd_ha, gfp_flags);
1095
1096 if (!ascb)
1097 break;
1098 else if (!first)
1099 first = ascb;
1100 else {
1101 struct asd_ascb *last = list_entry(first->list.prev,
1102 struct asd_ascb,
1103 list);
1104 list_add_tail(&ascb->list, &first->list);
1105 last->scb->header.next_scb =
1106 cpu_to_le64(((u64)ascb->dma_scb.dma_handle));
1107 }
1108 }
1109
1110 return first;
1111}
1112
1113/**
1114 * asd_swap_head_scb -- swap the head scb
1115 * @asd_ha: pointer to host adapter structure
1116 * @ascb: pointer to the head of an ascb list
1117 *
1118 * The sequencer knows the DMA address of the next SCB to be DMAed to
1119 * the host adapter, from initialization or from the last list DMAed.
1120 * seq->next_scb keeps the address of this SCB. The sequencer will
1121 * DMA to the host adapter this list of SCBs. But the head (first
1122 * element) of this list is not known to the sequencer. Here we swap
1123 * the head of the list with the known SCB (memcpy()).
1124 * Only one memcpy() is required per list so it is in our interest
1125 * to keep the list of SCB as long as possible so that the ratio
1126 * of number of memcpy calls to the number of SCB DMA-ed is as small
1127 * as possible.
1128 *
1129 * LOCKING: called with the pending list lock held.
1130 */
1131static inline void asd_swap_head_scb(struct asd_ha_struct *asd_ha,
1132 struct asd_ascb *ascb)
1133{
1134 struct asd_seq_data *seq = &asd_ha->seq;
1135 struct asd_ascb *last = list_entry(ascb->list.prev,
1136 struct asd_ascb,
1137 list);
1138 struct asd_dma_tok t = ascb->dma_scb;
1139
1140 memcpy(seq->next_scb.vaddr, ascb->scb, sizeof(*ascb->scb));
1141 ascb->dma_scb = seq->next_scb;
1142 ascb->scb = ascb->dma_scb.vaddr;
1143 seq->next_scb = t;
1144 last->scb->header.next_scb =
1145 cpu_to_le64(((u64)seq->next_scb.dma_handle));
1146}
1147
1148/**
1149 * asd_start_timers -- (add and) start timers of SCBs
1150 * @list: pointer to struct list_head of the scbs
1151 * @to: timeout in jiffies
1152 *
1153 * If an SCB in the @list has no timer function, assign the default
1154 * one, then start the timer of the SCB. This function is
1155 * intended to be called from asd_post_ascb_list(), just prior to
1156 * posting the SCBs to the sequencer.
1157 */
1158static inline void asd_start_scb_timers(struct list_head *list)
1159{
1160 struct asd_ascb *ascb;
1161 list_for_each_entry(ascb, list, list) {
1162 if (!ascb->uldd_timer) {
1163 ascb->timer.data = (unsigned long) ascb;
1164 ascb->timer.function = asd_ascb_timedout;
1165 ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
1166 add_timer(&ascb->timer);
1167 }
1168 }
1169}
1170
1171/**
1172 * asd_post_ascb_list -- post a list of 1 or more aSCBs to the host adapter
1173 * @asd_ha: pointer to a host adapter structure
1174 * @ascb: pointer to the first aSCB in the list
1175 * @num: number of aSCBs in the list (to be posted)
1176 *
1177 * See queueing comment in asd_post_escb_list().
1178 *
1179 * Additional note on queuing: In order to minimize the ratio of memcpy()
1180 * to the number of ascbs sent, we try to batch-send as many ascbs as possible
1181 * in one go.
1182 * Two cases are possible:
1183 * A) can_queue >= num,
1184 * B) can_queue < num.
1185 * Case A: we can send the whole batch at once. Increment "pending"
1186 * in the beginning of this function, when it is checked, in order to
1187 * eliminate races when this function is called by multiple processes.
1188 * Case B: should never happen if the managing layer considers
1189 * lldd_queue_size.
1190 */
1191int asd_post_ascb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb,
1192 int num)
1193{
1194 unsigned long flags;
1195 LIST_HEAD(list);
1196 int can_queue;
1197
1198 spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
1199 can_queue = asd_ha->hw_prof.max_scbs - asd_ha->seq.pending;
1200 if (can_queue >= num)
1201 asd_ha->seq.pending += num;
1202 else
1203 can_queue = 0;
1204
1205 if (!can_queue) {
1206 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
1207 asd_printk("%s: scb queue full\n", pci_name(asd_ha->pcidev));
1208 return -SAS_QUEUE_FULL;
1209 }
1210
1211 asd_swap_head_scb(asd_ha, ascb);
1212
1213 __list_add(&list, ascb->list.prev, &ascb->list);
1214
1215 asd_start_scb_timers(&list);
1216
1217 asd_ha->seq.scbpro += num;
1218 list_splice_init(&list, asd_ha->seq.pend_q.prev);
1219 asd_write_reg_dword(asd_ha, SCBPRO, (u32)asd_ha->seq.scbpro);
1220 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
1221
1222 return 0;
1223}
1224
1225/**
1226 * asd_post_escb_list -- post a list of 1 or more empty scb
1227 * @asd_ha: pointer to a host adapter structure
1228 * @ascb: pointer to the first empty SCB in the list
1229 * @num: number of aSCBs in the list (to be posted)
1230 *
1231 * This is essentially the same as asd_post_ascb_list, but we do not
1232 * increment pending, add those to the pending list or get indexes.
1233 * See asd_init_escbs() and asd_init_post_escbs().
1234 *
1235 * Since sending a list of ascbs is a superset of sending a single
1236 * ascb, this function exists to generalize this. More specifically,
1237 * when sending a list of those, we want to do only a _single_
1238 * memcpy() at swap head, as opposed to for each ascb sent (in the
1239 * case of sending them one by one). That is, we want to minimize the
1240 * ratio of memcpy() operations to the number of ascbs sent. The same
1241 * logic applies to asd_post_ascb_list().
1242 */
1243int asd_post_escb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb,
1244 int num)
1245{
1246 unsigned long flags;
1247
1248 spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
1249 asd_swap_head_scb(asd_ha, ascb);
1250 asd_ha->seq.scbpro += num;
1251 asd_write_reg_dword(asd_ha, SCBPRO, (u32)asd_ha->seq.scbpro);
1252 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
1253
1254 return 0;
1255}
1256
1257/* ---------- LED ---------- */
1258
1259/**
1260 * asd_turn_led -- turn on/off an LED
1261 * @asd_ha: pointer to host adapter structure
1262 * @phy_id: the PHY id whose LED we want to manupulate
1263 * @op: 1 to turn on, 0 to turn off
1264 */
1265void asd_turn_led(struct asd_ha_struct *asd_ha, int phy_id, int op)
1266{
1267 if (phy_id < ASD_MAX_PHYS) {
1268 u32 v = asd_read_reg_dword(asd_ha, LmCONTROL(phy_id));
1269 if (op)
1270 v |= LEDPOL;
1271 else
1272 v &= ~LEDPOL;
1273 asd_write_reg_dword(asd_ha, LmCONTROL(phy_id), v);
1274 }
1275}
1276
1277/**
1278 * asd_control_led -- enable/disable an LED on the board
1279 * @asd_ha: pointer to host adapter structure
1280 * @phy_id: integer, the phy id
1281 * @op: integer, 1 to enable, 0 to disable the LED
1282 *
1283 * First we output enable the LED, then we set the source
1284 * to be an external module.
1285 */
1286void asd_control_led(struct asd_ha_struct *asd_ha, int phy_id, int op)
1287{
1288 if (phy_id < ASD_MAX_PHYS) {
1289 u32 v;
1290
1291 v = asd_read_reg_dword(asd_ha, GPIOOER);
1292 if (op)
1293 v |= (1 << phy_id);
1294 else
1295 v &= ~(1 << phy_id);
1296 asd_write_reg_dword(asd_ha, GPIOOER, v);
1297
1298 v = asd_read_reg_dword(asd_ha, GPIOCNFGR);
1299 if (op)
1300 v |= (1 << phy_id);
1301 else
1302 v &= ~(1 << phy_id);
1303 asd_write_reg_dword(asd_ha, GPIOCNFGR, v);
1304 }
1305}
1306
1307/* ---------- PHY enable ---------- */
1308
1309static int asd_enable_phy(struct asd_ha_struct *asd_ha, int phy_id)
1310{
1311 struct asd_phy *phy = &asd_ha->phys[phy_id];
1312
1313 asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, INT_ENABLE_2), 0);
1314 asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, HOT_PLUG_DELAY),
1315 HOTPLUG_DELAY_TIMEOUT);
1316
1317 /* Get defaults from manuf. sector */
1318 /* XXX we need defaults for those in case MS is broken. */
1319 asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_0),
1320 phy->phy_desc->phy_control_0);
1321 asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_1),
1322 phy->phy_desc->phy_control_1);
1323 asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_2),
1324 phy->phy_desc->phy_control_2);
1325 asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_3),
1326 phy->phy_desc->phy_control_3);
1327
1328 asd_write_reg_dword(asd_ha, LmSEQ_TEN_MS_COMINIT_TIMEOUT(phy_id),
1329 ASD_COMINIT_TIMEOUT);
1330
1331 asd_write_reg_addr(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(phy_id),
1332 phy->id_frm_tok->dma_handle);
1333
1334 asd_control_led(asd_ha, phy_id, 1);
1335
1336 return 0;
1337}
1338
1339int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask)
1340{
1341 u8 phy_m;
1342 u8 i;
1343 int num = 0, k;
1344 struct asd_ascb *ascb;
1345 struct asd_ascb *ascb_list;
1346
1347 if (!phy_mask) {
1348 asd_printk("%s called with phy_mask of 0!?\n", __FUNCTION__);
1349 return 0;
1350 }
1351
1352 for_each_phy(phy_mask, phy_m, i) {
1353 num++;
1354 asd_enable_phy(asd_ha, i);
1355 }
1356
1357 k = num;
1358 ascb_list = asd_ascb_alloc_list(asd_ha, &k, GFP_KERNEL);
1359 if (!ascb_list) {
1360 asd_printk("no memory for control phy ascb list\n");
1361 return -ENOMEM;
1362 }
1363 num -= k;
1364
1365 ascb = ascb_list;
1366 for_each_phy(phy_mask, phy_m, i) {
1367 asd_build_control_phy(ascb, i, ENABLE_PHY);
1368 ascb = list_entry(ascb->list.next, struct asd_ascb, list);
1369 }
1370 ASD_DPRINTK("posting %d control phy scbs\n", num);
1371 k = asd_post_ascb_list(asd_ha, ascb_list, num);
1372 if (k)
1373 asd_ascb_free_list(ascb_list);
1374
1375 return k;
1376}
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.h b/drivers/scsi/aic94xx/aic94xx_hwi.h
new file mode 100644
index 000000000000..c7d505388fed
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.h
@@ -0,0 +1,397 @@
1/*
2 * Aic94xx SAS/SATA driver hardware interface header file.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#ifndef _AIC94XX_HWI_H_
28#define _AIC94XX_HWI_H_
29
30#include <linux/interrupt.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33
34#include <scsi/libsas.h>
35
36#include "aic94xx.h"
37#include "aic94xx_sas.h"
38
39/* Define ASD_MAX_PHYS to the maximum phys ever. Currently 8. */
40#define ASD_MAX_PHYS 8
41#define ASD_PCBA_SN_SIZE 12
42
43/* Those are to be further named properly, the "RAZORx" part, and
44 * subsequently included in include/linux/pci_ids.h.
45 */
46#define PCI_DEVICE_ID_ADAPTEC2_RAZOR10 0x410
47#define PCI_DEVICE_ID_ADAPTEC2_RAZOR12 0x412
48#define PCI_DEVICE_ID_ADAPTEC2_RAZOR1E 0x41E
49#define PCI_DEVICE_ID_ADAPTEC2_RAZOR30 0x430
50#define PCI_DEVICE_ID_ADAPTEC2_RAZOR32 0x432
51#define PCI_DEVICE_ID_ADAPTEC2_RAZOR3E 0x43E
52#define PCI_DEVICE_ID_ADAPTEC2_RAZOR3F 0x43F
53
54struct asd_ha_addrspace {
55 void __iomem *addr;
56 unsigned long start; /* pci resource start */
57 unsigned long len; /* pci resource len */
58 unsigned long flags; /* pci resource flags */
59
60 /* addresses internal to the host adapter */
61 u32 swa_base; /* mmspace 1 (MBAR1) uses this only */
62 u32 swb_base;
63 u32 swc_base;
64};
65
66struct bios_struct {
67 int present;
68 u8 maj;
69 u8 min;
70 u32 bld;
71};
72
73struct unit_element_struct {
74 u16 num;
75 u16 size;
76 void *area;
77};
78
79struct flash_struct {
80 u32 bar;
81 int present;
82 int wide;
83 u8 manuf;
84 u8 dev_id;
85 u8 sec_prot;
86
87 u32 dir_offs;
88};
89
90struct asd_phy_desc {
91 /* From CTRL-A settings, then set to what is appropriate */
92 u8 sas_addr[SAS_ADDR_SIZE];
93 u8 max_sas_lrate;
94 u8 min_sas_lrate;
95 u8 max_sata_lrate;
96 u8 min_sata_lrate;
97 u8 flags;
98#define ASD_CRC_DIS 1
99#define ASD_SATA_SPINUP_HOLD 2
100
101 u8 phy_control_0; /* mode 5 reg 0x160 */
102 u8 phy_control_1; /* mode 5 reg 0x161 */
103 u8 phy_control_2; /* mode 5 reg 0x162 */
104 u8 phy_control_3; /* mode 5 reg 0x163 */
105};
106
107struct asd_dma_tok {
108 void *vaddr;
109 dma_addr_t dma_handle;
110 size_t size;
111};
112
113struct hw_profile {
114 struct bios_struct bios;
115 struct unit_element_struct ue;
116 struct flash_struct flash;
117
118 u8 sas_addr[SAS_ADDR_SIZE];
119 char pcba_sn[ASD_PCBA_SN_SIZE+1];
120
121 u8 enabled_phys; /* mask of enabled phys */
122 struct asd_phy_desc phy_desc[ASD_MAX_PHYS];
123 u32 max_scbs; /* absolute sequencer scb queue size */
124 struct asd_dma_tok *scb_ext;
125 u32 max_ddbs;
126 struct asd_dma_tok *ddb_ext;
127
128 spinlock_t ddb_lock;
129 void *ddb_bitmap;
130
131 int num_phys; /* ENABLEABLE */
132 int max_phys; /* REPORTED + ENABLEABLE */
133
134 unsigned addr_range; /* max # of addrs; max # of possible ports */
135 unsigned port_name_base;
136 unsigned dev_name_base;
137 unsigned sata_name_base;
138};
139
140struct asd_ascb {
141 struct list_head list;
142 struct asd_ha_struct *ha;
143
144 struct scb *scb; /* equals dma_scb->vaddr */
145 struct asd_dma_tok dma_scb;
146 struct asd_dma_tok *sg_arr;
147
148 void (*tasklet_complete)(struct asd_ascb *, struct done_list_struct *);
149 u8 uldd_timer:1;
150
151 /* internally generated command */
152 struct timer_list timer;
153 struct completion completion;
154 u8 tag_valid:1;
155 __be16 tag; /* error recovery only */
156
157 /* If this is an Empty SCB, index of first edb in seq->edb_arr. */
158 int edb_index;
159
160 /* Used by the timer timeout function. */
161 int tc_index;
162
163 void *uldd_task;
164};
165
166#define ASD_DL_SIZE_BITS 0x8
167#define ASD_DL_SIZE (1<<(2+ASD_DL_SIZE_BITS))
168#define ASD_DEF_DL_TOGGLE 0x01
169
170struct asd_seq_data {
171 spinlock_t pend_q_lock;
172 u16 scbpro;
173 int pending;
174 struct list_head pend_q;
175 int can_queue; /* per adapter */
176 struct asd_dma_tok next_scb; /* next scb to be delivered to CSEQ */
177
178 spinlock_t tc_index_lock;
179 void **tc_index_array;
180 void *tc_index_bitmap;
181 int tc_index_bitmap_bits;
182
183 struct tasklet_struct dl_tasklet;
184 struct done_list_struct *dl; /* array of done list entries, equals */
185 struct asd_dma_tok *actual_dl; /* actual_dl->vaddr */
186 int dl_toggle;
187 int dl_next;
188
189 int num_edbs;
190 struct asd_dma_tok **edb_arr;
191 int num_escbs;
192 struct asd_ascb **escb_arr; /* array of pointers to escbs */
193};
194
195/* This is the Host Adapter structure. It describes the hardware
196 * SAS adapter.
197 */
198struct asd_ha_struct {
199 struct pci_dev *pcidev;
200 const char *name;
201
202 struct sas_ha_struct sas_ha;
203
204 u8 revision_id;
205
206 int iospace;
207 spinlock_t iolock;
208 struct asd_ha_addrspace io_handle[2];
209
210 struct hw_profile hw_prof;
211
212 struct asd_phy phys[ASD_MAX_PHYS];
213 struct asd_sas_port ports[ASD_MAX_PHYS];
214
215 struct dma_pool *scb_pool;
216
217 struct asd_seq_data seq; /* sequencer related */
218};
219
220/* ---------- Common macros ---------- */
221
222#define ASD_BUSADDR_LO(__dma_handle) ((u32)(__dma_handle))
223#define ASD_BUSADDR_HI(__dma_handle) (((sizeof(dma_addr_t))==8) \
224 ? ((u32)((__dma_handle) >> 32)) \
225 : ((u32)0))
226
227#define dev_to_asd_ha(__dev) pci_get_drvdata(to_pci_dev(__dev))
228#define SCB_SITE_VALID(__site_no) (((__site_no) & 0xF0FF) != 0x00FF \
229 && ((__site_no) & 0xF0FF) > 0x001F)
230/* For each bit set in __lseq_mask, set __lseq to equal the bit
231 * position of the set bit and execute the statement following.
232 * __mc is the temporary mask, used as a mask "counter".
233 */
234#define for_each_sequencer(__lseq_mask, __mc, __lseq) \
235 for ((__mc)=(__lseq_mask),(__lseq)=0;(__mc)!=0;(__lseq++),(__mc)>>=1)\
236 if (((__mc) & 1))
237#define for_each_phy(__lseq_mask, __mc, __lseq) \
238 for ((__mc)=(__lseq_mask),(__lseq)=0;(__mc)!=0;(__lseq++),(__mc)>>=1)\
239 if (((__mc) & 1))
240
241#define PHY_ENABLED(_HA, _I) ((_HA)->hw_prof.enabled_phys & (1<<(_I)))
242
243/* ---------- DMA allocs ---------- */
244
245static inline struct asd_dma_tok *asd_dmatok_alloc(unsigned int flags)
246{
247 return kmem_cache_alloc(asd_dma_token_cache, flags);
248}
249
250static inline void asd_dmatok_free(struct asd_dma_tok *token)
251{
252 kmem_cache_free(asd_dma_token_cache, token);
253}
254
255static inline struct asd_dma_tok *asd_alloc_coherent(struct asd_ha_struct *
256 asd_ha, size_t size,
257 unsigned int flags)
258{
259 struct asd_dma_tok *token = asd_dmatok_alloc(flags);
260 if (token) {
261 token->size = size;
262 token->vaddr = dma_alloc_coherent(&asd_ha->pcidev->dev,
263 token->size,
264 &token->dma_handle,
265 flags);
266 if (!token->vaddr) {
267 asd_dmatok_free(token);
268 token = NULL;
269 }
270 }
271 return token;
272}
273
274static inline void asd_free_coherent(struct asd_ha_struct *asd_ha,
275 struct asd_dma_tok *token)
276{
277 if (token) {
278 dma_free_coherent(&asd_ha->pcidev->dev, token->size,
279 token->vaddr, token->dma_handle);
280 asd_dmatok_free(token);
281 }
282}
283
284static inline void asd_init_ascb(struct asd_ha_struct *asd_ha,
285 struct asd_ascb *ascb)
286{
287 INIT_LIST_HEAD(&ascb->list);
288 ascb->scb = ascb->dma_scb.vaddr;
289 ascb->ha = asd_ha;
290 ascb->timer.function = NULL;
291 init_timer(&ascb->timer);
292 ascb->tc_index = -1;
293 init_completion(&ascb->completion);
294}
295
296/* Must be called with the tc_index_lock held!
297 */
298static inline void asd_tc_index_release(struct asd_seq_data *seq, int index)
299{
300 seq->tc_index_array[index] = NULL;
301 clear_bit(index, seq->tc_index_bitmap);
302}
303
304/* Must be called with the tc_index_lock held!
305 */
306static inline int asd_tc_index_get(struct asd_seq_data *seq, void *ptr)
307{
308 int index;
309
310 index = find_first_zero_bit(seq->tc_index_bitmap,
311 seq->tc_index_bitmap_bits);
312 if (index == seq->tc_index_bitmap_bits)
313 return -1;
314
315 seq->tc_index_array[index] = ptr;
316 set_bit(index, seq->tc_index_bitmap);
317
318 return index;
319}
320
321/* Must be called with the tc_index_lock held!
322 */
323static inline void *asd_tc_index_find(struct asd_seq_data *seq, int index)
324{
325 return seq->tc_index_array[index];
326}
327
328/**
329 * asd_ascb_free -- free a single aSCB after is has completed
330 * @ascb: pointer to the aSCB of interest
331 *
332 * This frees an aSCB after it has been executed/completed by
333 * the sequencer.
334 */
335static inline void asd_ascb_free(struct asd_ascb *ascb)
336{
337 if (ascb) {
338 struct asd_ha_struct *asd_ha = ascb->ha;
339 unsigned long flags;
340
341 BUG_ON(!list_empty(&ascb->list));
342 spin_lock_irqsave(&ascb->ha->seq.tc_index_lock, flags);
343 asd_tc_index_release(&ascb->ha->seq, ascb->tc_index);
344 spin_unlock_irqrestore(&ascb->ha->seq.tc_index_lock, flags);
345 dma_pool_free(asd_ha->scb_pool, ascb->dma_scb.vaddr,
346 ascb->dma_scb.dma_handle);
347 kmem_cache_free(asd_ascb_cache, ascb);
348 }
349}
350
351/**
352 * asd_ascb_list_free -- free a list of ascbs
353 * @ascb_list: a list of ascbs
354 *
355 * This function will free a list of ascbs allocated by asd_ascb_alloc_list.
356 * It is used when say the scb queueing function returned QUEUE_FULL,
357 * and we do not need the ascbs any more.
358 */
359static inline void asd_ascb_free_list(struct asd_ascb *ascb_list)
360{
361 LIST_HEAD(list);
362 struct list_head *n, *pos;
363
364 __list_add(&list, ascb_list->list.prev, &ascb_list->list);
365 list_for_each_safe(pos, n, &list) {
366 list_del_init(pos);
367 asd_ascb_free(list_entry(pos, struct asd_ascb, list));
368 }
369}
370
371/* ---------- Function declarations ---------- */
372
373int asd_init_hw(struct asd_ha_struct *asd_ha);
374irqreturn_t asd_hw_isr(int irq, void *dev_id, struct pt_regs *regs);
375
376
377struct asd_ascb *asd_ascb_alloc_list(struct asd_ha_struct
378 *asd_ha, int *num,
379 unsigned int gfp_mask);
380
381int asd_post_ascb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb,
382 int num);
383int asd_post_escb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb,
384 int num);
385
386int asd_init_post_escbs(struct asd_ha_struct *asd_ha);
387void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc);
388void asd_control_led(struct asd_ha_struct *asd_ha, int phy_id, int op);
389void asd_turn_led(struct asd_ha_struct *asd_ha, int phy_id, int op);
390int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask);
391void asd_build_initiate_link_adm_task(struct asd_ascb *ascb, int phy_id,
392 u8 subfunc);
393
394void asd_ascb_timedout(unsigned long data);
395int asd_chip_hardrst(struct asd_ha_struct *asd_ha);
396
397#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
new file mode 100644
index 000000000000..ee2ccad70487
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -0,0 +1,866 @@
1/*
2 * Aic94xx SAS/SATA driver initialization.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#include <linux/config.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/kernel.h>
31#include <linux/pci.h>
32#include <linux/delay.h>
33
34#include <scsi/scsi_host.h>
35
36#include "aic94xx.h"
37#include "aic94xx_reg.h"
38#include "aic94xx_hwi.h"
39#include "aic94xx_seq.h"
40
41/* The format is "version.release.patchlevel" */
42#define ASD_DRIVER_VERSION "1.0.2"
43
44static int use_msi = 0;
45module_param_named(use_msi, use_msi, int, S_IRUGO);
46MODULE_PARM_DESC(use_msi, "\n"
47 "\tEnable(1) or disable(0) using PCI MSI.\n"
48 "\tDefault: 0");
49
50static int lldd_max_execute_num = 0;
51module_param_named(collector, lldd_max_execute_num, int, S_IRUGO);
52MODULE_PARM_DESC(collector, "\n"
53 "\tIf greater than one, tells the SAS Layer to run in Task Collector\n"
54 "\tMode. If 1 or 0, tells the SAS Layer to run in Direct Mode.\n"
55 "\tThe aic94xx SAS LLDD supports both modes.\n"
56 "\tDefault: 0 (Direct Mode).\n");
57
58char sas_addr_str[2*SAS_ADDR_SIZE + 1] = "";
59
60static struct scsi_transport_template *aic94xx_transport_template;
61
62static struct scsi_host_template aic94xx_sht = {
63 .module = THIS_MODULE,
64 /* .name is initialized */
65 .name = "aic94xx",
66 .queuecommand = sas_queuecommand,
67 .target_alloc = sas_target_alloc,
68 .slave_configure = sas_slave_configure,
69 .slave_destroy = sas_slave_destroy,
70 .change_queue_depth = sas_change_queue_depth,
71 .change_queue_type = sas_change_queue_type,
72 .bios_param = sas_bios_param,
73 .can_queue = 1,
74 .cmd_per_lun = 1,
75 .this_id = -1,
76 .sg_tablesize = SG_ALL,
77 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
78 .use_clustering = ENABLE_CLUSTERING,
79};
80
81static int __devinit asd_map_memio(struct asd_ha_struct *asd_ha)
82{
83 int err, i;
84 struct asd_ha_addrspace *io_handle;
85
86 asd_ha->iospace = 0;
87 for (i = 0; i < 3; i += 2) {
88 io_handle = &asd_ha->io_handle[i==0?0:1];
89 io_handle->start = pci_resource_start(asd_ha->pcidev, i);
90 io_handle->len = pci_resource_len(asd_ha->pcidev, i);
91 io_handle->flags = pci_resource_flags(asd_ha->pcidev, i);
92 err = -ENODEV;
93 if (!io_handle->start || !io_handle->len) {
94 asd_printk("MBAR%d start or length for %s is 0.\n",
95 i==0?0:1, pci_name(asd_ha->pcidev));
96 goto Err;
97 }
98 err = pci_request_region(asd_ha->pcidev, i, ASD_DRIVER_NAME);
99 if (err) {
100 asd_printk("couldn't reserve memory region for %s\n",
101 pci_name(asd_ha->pcidev));
102 goto Err;
103 }
104 if (io_handle->flags & IORESOURCE_CACHEABLE)
105 io_handle->addr = ioremap(io_handle->start,
106 io_handle->len);
107 else
108 io_handle->addr = ioremap_nocache(io_handle->start,
109 io_handle->len);
110 if (!io_handle->addr) {
111 asd_printk("couldn't map MBAR%d of %s\n", i==0?0:1,
112 pci_name(asd_ha->pcidev));
113 goto Err_unreq;
114 }
115 }
116
117 return 0;
118Err_unreq:
119 pci_release_region(asd_ha->pcidev, i);
120Err:
121 if (i > 0) {
122 io_handle = &asd_ha->io_handle[0];
123 iounmap(io_handle->addr);
124 pci_release_region(asd_ha->pcidev, 0);
125 }
126 return err;
127}
128
129static void __devexit asd_unmap_memio(struct asd_ha_struct *asd_ha)
130{
131 struct asd_ha_addrspace *io_handle;
132
133 io_handle = &asd_ha->io_handle[1];
134 iounmap(io_handle->addr);
135 pci_release_region(asd_ha->pcidev, 2);
136
137 io_handle = &asd_ha->io_handle[0];
138 iounmap(io_handle->addr);
139 pci_release_region(asd_ha->pcidev, 0);
140}
141
142static int __devinit asd_map_ioport(struct asd_ha_struct *asd_ha)
143{
144 int i = PCI_IOBAR_OFFSET, err;
145 struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0];
146
147 asd_ha->iospace = 1;
148 io_handle->start = pci_resource_start(asd_ha->pcidev, i);
149 io_handle->len = pci_resource_len(asd_ha->pcidev, i);
150 io_handle->flags = pci_resource_flags(asd_ha->pcidev, i);
151 io_handle->addr = (void __iomem *) io_handle->start;
152 if (!io_handle->start || !io_handle->len) {
153 asd_printk("couldn't get IO ports for %s\n",
154 pci_name(asd_ha->pcidev));
155 return -ENODEV;
156 }
157 err = pci_request_region(asd_ha->pcidev, i, ASD_DRIVER_NAME);
158 if (err) {
159 asd_printk("couldn't reserve io space for %s\n",
160 pci_name(asd_ha->pcidev));
161 }
162
163 return err;
164}
165
166static void __devexit asd_unmap_ioport(struct asd_ha_struct *asd_ha)
167{
168 pci_release_region(asd_ha->pcidev, PCI_IOBAR_OFFSET);
169}
170
171static int __devinit asd_map_ha(struct asd_ha_struct *asd_ha)
172{
173 int err;
174 u16 cmd_reg;
175
176 err = pci_read_config_word(asd_ha->pcidev, PCI_COMMAND, &cmd_reg);
177 if (err) {
178 asd_printk("couldn't read command register of %s\n",
179 pci_name(asd_ha->pcidev));
180 goto Err;
181 }
182
183 err = -ENODEV;
184 if (cmd_reg & PCI_COMMAND_MEMORY) {
185 if ((err = asd_map_memio(asd_ha)))
186 goto Err;
187 } else if (cmd_reg & PCI_COMMAND_IO) {
188 if ((err = asd_map_ioport(asd_ha)))
189 goto Err;
190 asd_printk("%s ioport mapped -- upgrade your hardware\n",
191 pci_name(asd_ha->pcidev));
192 } else {
193 asd_printk("no proper device access to %s\n",
194 pci_name(asd_ha->pcidev));
195 goto Err;
196 }
197
198 return 0;
199Err:
200 return err;
201}
202
203static void __devexit asd_unmap_ha(struct asd_ha_struct *asd_ha)
204{
205 if (asd_ha->iospace)
206 asd_unmap_ioport(asd_ha);
207 else
208 asd_unmap_memio(asd_ha);
209}
210
211static const char *asd_dev_rev[30] = {
212 [0] = "A0",
213 [1] = "A1",
214 [8] = "B0",
215};
216
217static int __devinit asd_common_setup(struct asd_ha_struct *asd_ha)
218{
219 int err, i;
220
221 err = pci_read_config_byte(asd_ha->pcidev, PCI_REVISION_ID,
222 &asd_ha->revision_id);
223 if (err) {
224 asd_printk("couldn't read REVISION ID register of %s\n",
225 pci_name(asd_ha->pcidev));
226 goto Err;
227 }
228 err = -ENODEV;
229 if (asd_ha->revision_id < AIC9410_DEV_REV_B0) {
230 asd_printk("%s is revision %s (%X), which is not supported\n",
231 pci_name(asd_ha->pcidev),
232 asd_dev_rev[asd_ha->revision_id],
233 asd_ha->revision_id);
234 goto Err;
235 }
236 /* Provide some sane default values. */
237 asd_ha->hw_prof.max_scbs = 512;
238 asd_ha->hw_prof.max_ddbs = 128;
239 asd_ha->hw_prof.num_phys = ASD_MAX_PHYS;
240 /* All phys are enabled, by default. */
241 asd_ha->hw_prof.enabled_phys = 0xFF;
242 for (i = 0; i < ASD_MAX_PHYS; i++) {
243 asd_ha->hw_prof.phy_desc[i].max_sas_lrate =
244 SAS_LINK_RATE_3_0_GBPS;
245 asd_ha->hw_prof.phy_desc[i].min_sas_lrate =
246 SAS_LINK_RATE_1_5_GBPS;
247 asd_ha->hw_prof.phy_desc[i].max_sata_lrate =
248 SAS_LINK_RATE_1_5_GBPS;
249 asd_ha->hw_prof.phy_desc[i].min_sata_lrate =
250 SAS_LINK_RATE_1_5_GBPS;
251 }
252
253 return 0;
254Err:
255 return err;
256}
257
258static int __devinit asd_aic9410_setup(struct asd_ha_struct *asd_ha)
259{
260 int err = asd_common_setup(asd_ha);
261
262 if (err)
263 return err;
264
265 asd_ha->hw_prof.addr_range = 8;
266 asd_ha->hw_prof.port_name_base = 0;
267 asd_ha->hw_prof.dev_name_base = 8;
268 asd_ha->hw_prof.sata_name_base = 16;
269
270 return 0;
271}
272
273static int __devinit asd_aic9405_setup(struct asd_ha_struct *asd_ha)
274{
275 int err = asd_common_setup(asd_ha);
276
277 if (err)
278 return err;
279
280 asd_ha->hw_prof.addr_range = 4;
281 asd_ha->hw_prof.port_name_base = 0;
282 asd_ha->hw_prof.dev_name_base = 4;
283 asd_ha->hw_prof.sata_name_base = 8;
284
285 return 0;
286}
287
288static ssize_t asd_show_dev_rev(struct device *dev,
289 struct device_attribute *attr, char *buf)
290{
291 struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev);
292 return snprintf(buf, PAGE_SIZE, "%s\n",
293 asd_dev_rev[asd_ha->revision_id]);
294}
295static DEVICE_ATTR(revision, S_IRUGO, asd_show_dev_rev, NULL);
296
297static ssize_t asd_show_dev_bios_build(struct device *dev,
298 struct device_attribute *attr,char *buf)
299{
300 struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev);
301 return snprintf(buf, PAGE_SIZE, "%d\n", asd_ha->hw_prof.bios.bld);
302}
303static DEVICE_ATTR(bios_build, S_IRUGO, asd_show_dev_bios_build, NULL);
304
305static ssize_t asd_show_dev_pcba_sn(struct device *dev,
306 struct device_attribute *attr, char *buf)
307{
308 struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev);
309 return snprintf(buf, PAGE_SIZE, "%s\n", asd_ha->hw_prof.pcba_sn);
310}
311static DEVICE_ATTR(pcba_sn, S_IRUGO, asd_show_dev_pcba_sn, NULL);
312
313static void asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
314{
315 device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision);
316 device_create_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
317 device_create_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
318}
319
320static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha)
321{
322 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
323 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
324 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
325}
326
327/* The first entry, 0, is used for dynamic ids, the rest for devices
328 * we know about.
329 */
330static struct asd_pcidev_struct {
331 const char * name;
332 int (*setup)(struct asd_ha_struct *asd_ha);
333} asd_pcidev_data[] = {
334 /* Id 0 is used for dynamic ids. */
335 { .name = "Adaptec AIC-94xx SAS/SATA Host Adapter",
336 .setup = asd_aic9410_setup
337 },
338 { .name = "Adaptec AIC-9410W SAS/SATA Host Adapter",
339 .setup = asd_aic9410_setup
340 },
341 { .name = "Adaptec AIC-9405W SAS/SATA Host Adapter",
342 .setup = asd_aic9405_setup
343 },
344};
345
346static inline int asd_create_ha_caches(struct asd_ha_struct *asd_ha)
347{
348 asd_ha->scb_pool = dma_pool_create(ASD_DRIVER_NAME "_scb_pool",
349 &asd_ha->pcidev->dev,
350 sizeof(struct scb),
351 8, 0);
352 if (!asd_ha->scb_pool) {
353 asd_printk("couldn't create scb pool\n");
354 return -ENOMEM;
355 }
356
357 return 0;
358}
359
360/**
361 * asd_free_edbs -- free empty data buffers
362 * asd_ha: pointer to host adapter structure
363 */
364static inline void asd_free_edbs(struct asd_ha_struct *asd_ha)
365{
366 struct asd_seq_data *seq = &asd_ha->seq;
367 int i;
368
369 for (i = 0; i < seq->num_edbs; i++)
370 asd_free_coherent(asd_ha, seq->edb_arr[i]);
371 kfree(seq->edb_arr);
372 seq->edb_arr = NULL;
373}
374
375static inline void asd_free_escbs(struct asd_ha_struct *asd_ha)
376{
377 struct asd_seq_data *seq = &asd_ha->seq;
378 int i;
379
380 for (i = 0; i < seq->num_escbs; i++) {
381 if (!list_empty(&seq->escb_arr[i]->list))
382 list_del_init(&seq->escb_arr[i]->list);
383
384 asd_ascb_free(seq->escb_arr[i]);
385 }
386 kfree(seq->escb_arr);
387 seq->escb_arr = NULL;
388}
389
390static inline void asd_destroy_ha_caches(struct asd_ha_struct *asd_ha)
391{
392 int i;
393
394 if (asd_ha->hw_prof.ddb_ext)
395 asd_free_coherent(asd_ha, asd_ha->hw_prof.ddb_ext);
396 if (asd_ha->hw_prof.scb_ext)
397 asd_free_coherent(asd_ha, asd_ha->hw_prof.scb_ext);
398
399 if (asd_ha->hw_prof.ddb_bitmap)
400 kfree(asd_ha->hw_prof.ddb_bitmap);
401 asd_ha->hw_prof.ddb_bitmap = NULL;
402
403 for (i = 0; i < ASD_MAX_PHYS; i++) {
404 struct asd_phy *phy = &asd_ha->phys[i];
405
406 asd_free_coherent(asd_ha, phy->id_frm_tok);
407 }
408 if (asd_ha->seq.escb_arr)
409 asd_free_escbs(asd_ha);
410 if (asd_ha->seq.edb_arr)
411 asd_free_edbs(asd_ha);
412 if (asd_ha->hw_prof.ue.area) {
413 kfree(asd_ha->hw_prof.ue.area);
414 asd_ha->hw_prof.ue.area = NULL;
415 }
416 if (asd_ha->seq.tc_index_array) {
417 kfree(asd_ha->seq.tc_index_array);
418 kfree(asd_ha->seq.tc_index_bitmap);
419 asd_ha->seq.tc_index_array = NULL;
420 asd_ha->seq.tc_index_bitmap = NULL;
421 }
422 if (asd_ha->seq.actual_dl) {
423 asd_free_coherent(asd_ha, asd_ha->seq.actual_dl);
424 asd_ha->seq.actual_dl = NULL;
425 asd_ha->seq.dl = NULL;
426 }
427 if (asd_ha->seq.next_scb.vaddr) {
428 dma_pool_free(asd_ha->scb_pool, asd_ha->seq.next_scb.vaddr,
429 asd_ha->seq.next_scb.dma_handle);
430 asd_ha->seq.next_scb.vaddr = NULL;
431 }
432 dma_pool_destroy(asd_ha->scb_pool);
433 asd_ha->scb_pool = NULL;
434}
435
436kmem_cache_t *asd_dma_token_cache;
437kmem_cache_t *asd_ascb_cache;
438
439static int asd_create_global_caches(void)
440{
441 if (!asd_dma_token_cache) {
442 asd_dma_token_cache
443 = kmem_cache_create(ASD_DRIVER_NAME "_dma_token",
444 sizeof(struct asd_dma_tok),
445 0,
446 SLAB_HWCACHE_ALIGN,
447 NULL, NULL);
448 if (!asd_dma_token_cache) {
449 asd_printk("couldn't create dma token cache\n");
450 return -ENOMEM;
451 }
452 }
453
454 if (!asd_ascb_cache) {
455 asd_ascb_cache = kmem_cache_create(ASD_DRIVER_NAME "_ascb",
456 sizeof(struct asd_ascb),
457 0,
458 SLAB_HWCACHE_ALIGN,
459 NULL, NULL);
460 if (!asd_ascb_cache) {
461 asd_printk("couldn't create ascb cache\n");
462 goto Err;
463 }
464 }
465
466 return 0;
467Err:
468 kmem_cache_destroy(asd_dma_token_cache);
469 asd_dma_token_cache = NULL;
470 return -ENOMEM;
471}
472
473static void asd_destroy_global_caches(void)
474{
475 if (asd_dma_token_cache)
476 kmem_cache_destroy(asd_dma_token_cache);
477 asd_dma_token_cache = NULL;
478
479 if (asd_ascb_cache)
480 kmem_cache_destroy(asd_ascb_cache);
481 asd_ascb_cache = NULL;
482}
483
484static int asd_register_sas_ha(struct asd_ha_struct *asd_ha)
485{
486 int i;
487 struct asd_sas_phy **sas_phys =
488 kmalloc(ASD_MAX_PHYS * sizeof(struct asd_sas_phy), GFP_KERNEL);
489 struct asd_sas_port **sas_ports =
490 kmalloc(ASD_MAX_PHYS * sizeof(struct asd_sas_port), GFP_KERNEL);
491
492 if (!sas_phys || !sas_ports) {
493 kfree(sas_phys);
494 kfree(sas_ports);
495 return -ENOMEM;
496 }
497
498 asd_ha->sas_ha.sas_ha_name = (char *) asd_ha->name;
499 asd_ha->sas_ha.lldd_module = THIS_MODULE;
500 asd_ha->sas_ha.sas_addr = &asd_ha->hw_prof.sas_addr[0];
501
502 for (i = 0; i < ASD_MAX_PHYS; i++) {
503 sas_phys[i] = &asd_ha->phys[i].sas_phy;
504 sas_ports[i] = &asd_ha->ports[i];
505 }
506
507 asd_ha->sas_ha.sas_phy = sas_phys;
508 asd_ha->sas_ha.sas_port= sas_ports;
509 asd_ha->sas_ha.num_phys= ASD_MAX_PHYS;
510
511 asd_ha->sas_ha.lldd_queue_size = asd_ha->seq.can_queue;
512
513 return sas_register_ha(&asd_ha->sas_ha);
514}
515
516static int asd_unregister_sas_ha(struct asd_ha_struct *asd_ha)
517{
518 int err;
519
520 err = sas_unregister_ha(&asd_ha->sas_ha);
521
522 sas_remove_host(asd_ha->sas_ha.core.shost);
523 scsi_remove_host(asd_ha->sas_ha.core.shost);
524 scsi_host_put(asd_ha->sas_ha.core.shost);
525
526 kfree(asd_ha->sas_ha.sas_phy);
527 kfree(asd_ha->sas_ha.sas_port);
528
529 return err;
530}
531
532static int __devinit asd_pci_probe(struct pci_dev *dev,
533 const struct pci_device_id *id)
534{
535 struct asd_pcidev_struct *asd_dev;
536 unsigned asd_id = (unsigned) id->driver_data;
537 struct asd_ha_struct *asd_ha;
538 struct Scsi_Host *shost;
539 int err;
540
541 if (asd_id >= ARRAY_SIZE(asd_pcidev_data)) {
542 asd_printk("wrong driver_data in PCI table\n");
543 return -ENODEV;
544 }
545
546 if ((err = pci_enable_device(dev))) {
547 asd_printk("couldn't enable device %s\n", pci_name(dev));
548 return err;
549 }
550
551 pci_set_master(dev);
552
553 err = -ENOMEM;
554
555 shost = scsi_host_alloc(&aic94xx_sht, sizeof(void *));
556 if (!shost)
557 goto Err;
558
559 asd_dev = &asd_pcidev_data[asd_id];
560
561 asd_ha = kzalloc(sizeof(*asd_ha), GFP_KERNEL);
562 if (!asd_ha) {
563 asd_printk("out of memory\n");
564 goto Err;
565 }
566 asd_ha->pcidev = dev;
567 asd_ha->sas_ha.pcidev = asd_ha->pcidev;
568 asd_ha->sas_ha.lldd_ha = asd_ha;
569
570 asd_ha->name = asd_dev->name;
571 asd_printk("found %s, device %s\n", asd_ha->name, pci_name(dev));
572
573 SHOST_TO_SAS_HA(shost) = &asd_ha->sas_ha;
574 asd_ha->sas_ha.core.shost = shost;
575 shost->transportt = aic94xx_transport_template;
576 shost->max_id = ~0;
577 shost->max_lun = ~0;
578 shost->max_cmd_len = 16;
579
580 err = scsi_add_host(shost, &dev->dev);
581 if (err) {
582 scsi_host_put(shost);
583 goto Err_free;
584 }
585
586
587
588 err = asd_dev->setup(asd_ha);
589 if (err)
590 goto Err_free;
591
592 err = -ENODEV;
593 if (!pci_set_dma_mask(dev, DMA_64BIT_MASK)
594 && !pci_set_consistent_dma_mask(dev, DMA_64BIT_MASK))
595 ;
596 else if (!pci_set_dma_mask(dev, DMA_32BIT_MASK)
597 && !pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK))
598 ;
599 else {
600 asd_printk("no suitable DMA mask for %s\n", pci_name(dev));
601 goto Err_free;
602 }
603
604 pci_set_drvdata(dev, asd_ha);
605
606 err = asd_map_ha(asd_ha);
607 if (err)
608 goto Err_free;
609
610 err = asd_create_ha_caches(asd_ha);
611 if (err)
612 goto Err_unmap;
613
614 err = asd_init_hw(asd_ha);
615 if (err)
616 goto Err_free_cache;
617
618 asd_printk("device %s: SAS addr %llx, PCBA SN %s, %d phys, %d enabled "
619 "phys, flash %s, BIOS %s%d\n",
620 pci_name(dev), SAS_ADDR(asd_ha->hw_prof.sas_addr),
621 asd_ha->hw_prof.pcba_sn, asd_ha->hw_prof.max_phys,
622 asd_ha->hw_prof.num_phys,
623 asd_ha->hw_prof.flash.present ? "present" : "not present",
624 asd_ha->hw_prof.bios.present ? "build " : "not present",
625 asd_ha->hw_prof.bios.bld);
626
627 shost->can_queue = asd_ha->seq.can_queue;
628
629 if (use_msi)
630 pci_enable_msi(asd_ha->pcidev);
631
632 err = request_irq(asd_ha->pcidev->irq, asd_hw_isr, SA_SHIRQ,
633 ASD_DRIVER_NAME, asd_ha);
634 if (err) {
635 asd_printk("couldn't get irq %d for %s\n",
636 asd_ha->pcidev->irq, pci_name(asd_ha->pcidev));
637 goto Err_irq;
638 }
639 asd_enable_ints(asd_ha);
640
641 err = asd_init_post_escbs(asd_ha);
642 if (err) {
643 asd_printk("couldn't post escbs for %s\n",
644 pci_name(asd_ha->pcidev));
645 goto Err_escbs;
646 }
647 ASD_DPRINTK("escbs posted\n");
648
649 asd_create_dev_attrs(asd_ha);
650
651 err = asd_register_sas_ha(asd_ha);
652 if (err)
653 goto Err_reg_sas;
654
655 err = asd_enable_phys(asd_ha, asd_ha->hw_prof.enabled_phys);
656 if (err) {
657 asd_printk("coudln't enable phys, err:%d\n", err);
658 goto Err_en_phys;
659 }
660 ASD_DPRINTK("enabled phys\n");
661 /* give the phy enabling interrupt event time to come in (1s
662 * is empirically about all it takes) */
663 ssleep(1);
664 /* Wait for discovery to finish */
665 scsi_flush_work(asd_ha->sas_ha.core.shost);
666
667 return 0;
668Err_en_phys:
669 asd_unregister_sas_ha(asd_ha);
670Err_reg_sas:
671 asd_remove_dev_attrs(asd_ha);
672Err_escbs:
673 asd_disable_ints(asd_ha);
674 free_irq(dev->irq, asd_ha);
675Err_irq:
676 if (use_msi)
677 pci_disable_msi(dev);
678 asd_chip_hardrst(asd_ha);
679Err_free_cache:
680 asd_destroy_ha_caches(asd_ha);
681Err_unmap:
682 asd_unmap_ha(asd_ha);
683Err_free:
684 kfree(asd_ha);
685 scsi_remove_host(shost);
686Err:
687 pci_disable_device(dev);
688 return err;
689}
690
691static void asd_free_queues(struct asd_ha_struct *asd_ha)
692{
693 unsigned long flags;
694 LIST_HEAD(pending);
695 struct list_head *n, *pos;
696
697 spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
698 asd_ha->seq.pending = 0;
699 list_splice_init(&asd_ha->seq.pend_q, &pending);
700 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
701
702 if (!list_empty(&pending))
703 ASD_DPRINTK("Uh-oh! Pending is not empty!\n");
704
705 list_for_each_safe(pos, n, &pending) {
706 struct asd_ascb *ascb = list_entry(pos, struct asd_ascb, list);
707 list_del_init(pos);
708 ASD_DPRINTK("freeing from pending\n");
709 asd_ascb_free(ascb);
710 }
711}
712
713static void asd_turn_off_leds(struct asd_ha_struct *asd_ha)
714{
715 u8 phy_mask = asd_ha->hw_prof.enabled_phys;
716 u8 i;
717
718 for_each_phy(phy_mask, phy_mask, i) {
719 asd_turn_led(asd_ha, i, 0);
720 asd_control_led(asd_ha, i, 0);
721 }
722}
723
724static void __devexit asd_pci_remove(struct pci_dev *dev)
725{
726 struct asd_ha_struct *asd_ha = pci_get_drvdata(dev);
727
728 if (!asd_ha)
729 return;
730
731 asd_unregister_sas_ha(asd_ha);
732
733 asd_disable_ints(asd_ha);
734
735 asd_remove_dev_attrs(asd_ha);
736
737 /* XXX more here as needed */
738
739 free_irq(dev->irq, asd_ha);
740 if (use_msi)
741 pci_disable_msi(asd_ha->pcidev);
742 asd_turn_off_leds(asd_ha);
743 asd_chip_hardrst(asd_ha);
744 asd_free_queues(asd_ha);
745 asd_destroy_ha_caches(asd_ha);
746 asd_unmap_ha(asd_ha);
747 kfree(asd_ha);
748 pci_disable_device(dev);
749 return;
750}
751
752static ssize_t asd_version_show(struct device_driver *driver, char *buf)
753{
754 return snprintf(buf, PAGE_SIZE, "%s\n", ASD_DRIVER_VERSION);
755}
756static DRIVER_ATTR(version, S_IRUGO, asd_version_show, NULL);
757
758static void asd_create_driver_attrs(struct device_driver *driver)
759{
760 driver_create_file(driver, &driver_attr_version);
761}
762
763static void asd_remove_driver_attrs(struct device_driver *driver)
764{
765 driver_remove_file(driver, &driver_attr_version);
766}
767
768static struct sas_domain_function_template aic94xx_transport_functions = {
769 .lldd_port_formed = asd_update_port_links,
770
771 .lldd_dev_found = asd_dev_found,
772 .lldd_dev_gone = asd_dev_gone,
773
774 .lldd_execute_task = asd_execute_task,
775
776 .lldd_abort_task = asd_abort_task,
777 .lldd_abort_task_set = asd_abort_task_set,
778 .lldd_clear_aca = asd_clear_aca,
779 .lldd_clear_task_set = asd_clear_task_set,
780 .lldd_I_T_nexus_reset = NULL,
781 .lldd_lu_reset = asd_lu_reset,
782 .lldd_query_task = asd_query_task,
783
784 .lldd_clear_nexus_port = asd_clear_nexus_port,
785 .lldd_clear_nexus_ha = asd_clear_nexus_ha,
786
787 .lldd_control_phy = asd_control_phy,
788};
789
790static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
791 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR10),
792 0, 0, 1},
793 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR12),
794 0, 0, 1},
795 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR1E),
796 0, 0, 1},
797 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR30),
798 0, 0, 2},
799 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR32),
800 0, 0, 2},
801 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR3E),
802 0, 0, 2},
803 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR3F),
804 0, 0, 2},
805 {}
806};
807
808MODULE_DEVICE_TABLE(pci, aic94xx_pci_table);
809
810static struct pci_driver aic94xx_pci_driver = {
811 .name = ASD_DRIVER_NAME,
812 .id_table = aic94xx_pci_table,
813 .probe = asd_pci_probe,
814 .remove = __devexit_p(asd_pci_remove),
815};
816
817static int __init aic94xx_init(void)
818{
819 int err;
820
821
822 asd_printk("%s version %s loaded\n", ASD_DRIVER_DESCRIPTION,
823 ASD_DRIVER_VERSION);
824
825 err = asd_create_global_caches();
826 if (err)
827 return err;
828
829 aic94xx_transport_template =
830 sas_domain_attach_transport(&aic94xx_transport_functions);
831 if (!aic94xx_transport_template)
832 goto out_destroy_caches;
833
834 err = pci_register_driver(&aic94xx_pci_driver);
835 if (err)
836 goto out_release_transport;
837
838 asd_create_driver_attrs(&aic94xx_pci_driver.driver);
839
840 return err;
841
842 out_release_transport:
843 sas_release_transport(aic94xx_transport_template);
844 out_destroy_caches:
845 asd_destroy_global_caches();
846
847 return err;
848}
849
850static void __exit aic94xx_exit(void)
851{
852 asd_remove_driver_attrs(&aic94xx_pci_driver.driver);
853 pci_unregister_driver(&aic94xx_pci_driver);
854 sas_release_transport(aic94xx_transport_template);
855 asd_destroy_global_caches();
856 asd_printk("%s version %s unloaded\n", ASD_DRIVER_DESCRIPTION,
857 ASD_DRIVER_VERSION);
858}
859
860module_init(aic94xx_init);
861module_exit(aic94xx_exit);
862
863MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>");
864MODULE_DESCRIPTION(ASD_DRIVER_DESCRIPTION);
865MODULE_LICENSE("GPL v2");
866MODULE_VERSION(ASD_DRIVER_VERSION);
diff --git a/drivers/scsi/aic94xx/aic94xx_reg.c b/drivers/scsi/aic94xx/aic94xx_reg.c
new file mode 100644
index 000000000000..f210dac3203d
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_reg.c
@@ -0,0 +1,332 @@
1/*
2 * Aic94xx SAS/SATA driver register access.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#include <linux/pci.h>
28#include "aic94xx_reg.h"
29#include "aic94xx.h"
30
31/* Writing to device address space.
32 * Offset comes before value to remind that the operation of
33 * this function is *offs = val.
34 */
35static inline void asd_write_byte(struct asd_ha_struct *asd_ha,
36 unsigned long offs, u8 val)
37{
38 if (unlikely(asd_ha->iospace))
39 outb(val,
40 (unsigned long)asd_ha->io_handle[0].addr + (offs & 0xFF));
41 else
42 writeb(val, asd_ha->io_handle[0].addr + offs);
43 wmb();
44}
45
46static inline void asd_write_word(struct asd_ha_struct *asd_ha,
47 unsigned long offs, u16 val)
48{
49 if (unlikely(asd_ha->iospace))
50 outw(val,
51 (unsigned long)asd_ha->io_handle[0].addr + (offs & 0xFF));
52 else
53 writew(val, asd_ha->io_handle[0].addr + offs);
54 wmb();
55}
56
57static inline void asd_write_dword(struct asd_ha_struct *asd_ha,
58 unsigned long offs, u32 val)
59{
60 if (unlikely(asd_ha->iospace))
61 outl(val,
62 (unsigned long)asd_ha->io_handle[0].addr + (offs & 0xFF));
63 else
64 writel(val, asd_ha->io_handle[0].addr + offs);
65 wmb();
66}
67
68/* Reading from device address space.
69 */
70static inline u8 asd_read_byte(struct asd_ha_struct *asd_ha,
71 unsigned long offs)
72{
73 u8 val;
74 if (unlikely(asd_ha->iospace))
75 val = inb((unsigned long) asd_ha->io_handle[0].addr
76 + (offs & 0xFF));
77 else
78 val = readb(asd_ha->io_handle[0].addr + offs);
79 rmb();
80 return val;
81}
82
83static inline u16 asd_read_word(struct asd_ha_struct *asd_ha,
84 unsigned long offs)
85{
86 u16 val;
87 if (unlikely(asd_ha->iospace))
88 val = inw((unsigned long)asd_ha->io_handle[0].addr
89 + (offs & 0xFF));
90 else
91 val = readw(asd_ha->io_handle[0].addr + offs);
92 rmb();
93 return val;
94}
95
96static inline u32 asd_read_dword(struct asd_ha_struct *asd_ha,
97 unsigned long offs)
98{
99 u32 val;
100 if (unlikely(asd_ha->iospace))
101 val = inl((unsigned long) asd_ha->io_handle[0].addr
102 + (offs & 0xFF));
103 else
104 val = readl(asd_ha->io_handle[0].addr + offs);
105 rmb();
106 return val;
107}
108
109static inline u32 asd_mem_offs_swa(void)
110{
111 return 0;
112}
113
114static inline u32 asd_mem_offs_swc(void)
115{
116 return asd_mem_offs_swa() + MBAR0_SWA_SIZE;
117}
118
119static inline u32 asd_mem_offs_swb(void)
120{
121 return asd_mem_offs_swc() + MBAR0_SWC_SIZE + 0x20;
122}
123
124/* We know that the register wanted is in the range
125 * of the sliding window.
126 */
127#define ASD_READ_SW(ww, type, ord) \
128static inline type asd_read_##ww##_##ord (struct asd_ha_struct *asd_ha,\
129 u32 reg) \
130{ \
131 struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0]; \
132 u32 map_offs=(reg - io_handle-> ww##_base )+asd_mem_offs_##ww ();\
133 return asd_read_##ord (asd_ha, (unsigned long) map_offs); \
134}
135
136#define ASD_WRITE_SW(ww, type, ord) \
137static inline void asd_write_##ww##_##ord (struct asd_ha_struct *asd_ha,\
138 u32 reg, type val) \
139{ \
140 struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0]; \
141 u32 map_offs=(reg - io_handle-> ww##_base )+asd_mem_offs_##ww ();\
142 asd_write_##ord (asd_ha, (unsigned long) map_offs, val); \
143}
144
145ASD_READ_SW(swa, u8, byte);
146ASD_READ_SW(swa, u16, word);
147ASD_READ_SW(swa, u32, dword);
148
149ASD_READ_SW(swb, u8, byte);
150ASD_READ_SW(swb, u16, word);
151ASD_READ_SW(swb, u32, dword);
152
153ASD_READ_SW(swc, u8, byte);
154ASD_READ_SW(swc, u16, word);
155ASD_READ_SW(swc, u32, dword);
156
157ASD_WRITE_SW(swa, u8, byte);
158ASD_WRITE_SW(swa, u16, word);
159ASD_WRITE_SW(swa, u32, dword);
160
161ASD_WRITE_SW(swb, u8, byte);
162ASD_WRITE_SW(swb, u16, word);
163ASD_WRITE_SW(swb, u32, dword);
164
165ASD_WRITE_SW(swc, u8, byte);
166ASD_WRITE_SW(swc, u16, word);
167ASD_WRITE_SW(swc, u32, dword);
168
169/*
170 * A word about sliding windows:
171 * MBAR0 is divided into sliding windows A, C and B, in that order.
172 * SWA starts at offset 0 of MBAR0, up to 0x57, with size 0x58 bytes.
173 * SWC starts at offset 0x58 of MBAR0, up to 0x60, with size 0x8 bytes.
174 * From 0x60 to 0x7F, we have a copy of PCI config space 0x60-0x7F.
175 * SWB starts at offset 0x80 of MBAR0 and extends to the end of MBAR0.
176 * See asd_init_sw() in aic94xx_hwi.c
177 *
178 * We map the most common registers we'd access of the internal 4GB
179 * host adapter memory space. If a register/internal memory location
180 * is wanted which is not mapped, we slide SWB, by paging it,
181 * see asd_move_swb() in aic94xx_reg.c.
182 */
183
184/**
185 * asd_move_swb -- move sliding window B
186 * @asd_ha: pointer to host adapter structure
187 * @reg: register desired to be within range of the new window
188 */
189static inline void asd_move_swb(struct asd_ha_struct *asd_ha, u32 reg)
190{
191 u32 base = reg & ~(MBAR0_SWB_SIZE-1);
192 pci_write_config_dword(asd_ha->pcidev, PCI_CONF_MBAR0_SWB, base);
193 asd_ha->io_handle[0].swb_base = base;
194}
195
196static void __asd_write_reg_byte(struct asd_ha_struct *asd_ha, u32 reg, u8 val)
197{
198 struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0];
199 BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR);
200 if (io_handle->swa_base <= reg
201 && reg < io_handle->swa_base + MBAR0_SWA_SIZE)
202 asd_write_swa_byte (asd_ha, reg,val);
203 else if (io_handle->swb_base <= reg
204 && reg < io_handle->swb_base + MBAR0_SWB_SIZE)
205 asd_write_swb_byte (asd_ha, reg, val);
206 else if (io_handle->swc_base <= reg
207 && reg < io_handle->swc_base + MBAR0_SWC_SIZE)
208 asd_write_swc_byte (asd_ha, reg, val);
209 else {
210 /* Ok, we have to move SWB */
211 asd_move_swb(asd_ha, reg);
212 asd_write_swb_byte (asd_ha, reg, val);
213 }
214}
215
216#define ASD_WRITE_REG(type, ord) \
217void asd_write_reg_##ord (struct asd_ha_struct *asd_ha, u32 reg, type val)\
218{ \
219 struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0]; \
220 unsigned long flags; \
221 BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR); \
222 spin_lock_irqsave(&asd_ha->iolock, flags); \
223 if (io_handle->swa_base <= reg \
224 && reg < io_handle->swa_base + MBAR0_SWA_SIZE) \
225 asd_write_swa_##ord (asd_ha, reg,val); \
226 else if (io_handle->swb_base <= reg \
227 && reg < io_handle->swb_base + MBAR0_SWB_SIZE) \
228 asd_write_swb_##ord (asd_ha, reg, val); \
229 else if (io_handle->swc_base <= reg \
230 && reg < io_handle->swc_base + MBAR0_SWC_SIZE) \
231 asd_write_swc_##ord (asd_ha, reg, val); \
232 else { \
233 /* Ok, we have to move SWB */ \
234 asd_move_swb(asd_ha, reg); \
235 asd_write_swb_##ord (asd_ha, reg, val); \
236 } \
237 spin_unlock_irqrestore(&asd_ha->iolock, flags); \
238}
239
240ASD_WRITE_REG(u8, byte);
241ASD_WRITE_REG(u16,word);
242ASD_WRITE_REG(u32,dword);
243
244static u8 __asd_read_reg_byte(struct asd_ha_struct *asd_ha, u32 reg)
245{
246 struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0];
247 u8 val;
248 BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR);
249 if (io_handle->swa_base <= reg
250 && reg < io_handle->swa_base + MBAR0_SWA_SIZE)
251 val = asd_read_swa_byte (asd_ha, reg);
252 else if (io_handle->swb_base <= reg
253 && reg < io_handle->swb_base + MBAR0_SWB_SIZE)
254 val = asd_read_swb_byte (asd_ha, reg);
255 else if (io_handle->swc_base <= reg
256 && reg < io_handle->swc_base + MBAR0_SWC_SIZE)
257 val = asd_read_swc_byte (asd_ha, reg);
258 else {
259 /* Ok, we have to move SWB */
260 asd_move_swb(asd_ha, reg);
261 val = asd_read_swb_byte (asd_ha, reg);
262 }
263 return val;
264}
265
266#define ASD_READ_REG(type, ord) \
267type asd_read_reg_##ord (struct asd_ha_struct *asd_ha, u32 reg) \
268{ \
269 struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0]; \
270 type val; \
271 unsigned long flags; \
272 BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR); \
273 spin_lock_irqsave(&asd_ha->iolock, flags); \
274 if (io_handle->swa_base <= reg \
275 && reg < io_handle->swa_base + MBAR0_SWA_SIZE) \
276 val = asd_read_swa_##ord (asd_ha, reg); \
277 else if (io_handle->swb_base <= reg \
278 && reg < io_handle->swb_base + MBAR0_SWB_SIZE) \
279 val = asd_read_swb_##ord (asd_ha, reg); \
280 else if (io_handle->swc_base <= reg \
281 && reg < io_handle->swc_base + MBAR0_SWC_SIZE) \
282 val = asd_read_swc_##ord (asd_ha, reg); \
283 else { \
284 /* Ok, we have to move SWB */ \
285 asd_move_swb(asd_ha, reg); \
286 val = asd_read_swb_##ord (asd_ha, reg); \
287 } \
288 spin_unlock_irqrestore(&asd_ha->iolock, flags); \
289 return val; \
290}
291
292ASD_READ_REG(u8, byte);
293ASD_READ_REG(u16,word);
294ASD_READ_REG(u32,dword);
295
296/**
297 * asd_read_reg_string -- read a string of bytes from io space memory
298 * @asd_ha: pointer to host adapter structure
299 * @dst: pointer to a destination buffer where data will be written to
300 * @offs: start offset (register) to read from
301 * @count: number of bytes to read
302 */
303void asd_read_reg_string(struct asd_ha_struct *asd_ha, void *dst,
304 u32 offs, int count)
305{
306 u8 *p = dst;
307 unsigned long flags;
308
309 spin_lock_irqsave(&asd_ha->iolock, flags);
310 for ( ; count > 0; count--, offs++, p++)
311 *p = __asd_read_reg_byte(asd_ha, offs);
312 spin_unlock_irqrestore(&asd_ha->iolock, flags);
313}
314
315/**
316 * asd_write_reg_string -- write a string of bytes to io space memory
317 * @asd_ha: pointer to host adapter structure
318 * @src: pointer to source buffer where data will be read from
319 * @offs: start offset (register) to write to
320 * @count: number of bytes to write
321 */
322void asd_write_reg_string(struct asd_ha_struct *asd_ha, void *src,
323 u32 offs, int count)
324{
325 u8 *p = src;
326 unsigned long flags;
327
328 spin_lock_irqsave(&asd_ha->iolock, flags);
329 for ( ; count > 0; count--, offs++, p++)
330 __asd_write_reg_byte(asd_ha, offs, *p);
331 spin_unlock_irqrestore(&asd_ha->iolock, flags);
332}
diff --git a/drivers/scsi/aic94xx/aic94xx_reg.h b/drivers/scsi/aic94xx/aic94xx_reg.h
new file mode 100644
index 000000000000..2279307fd27e
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_reg.h
@@ -0,0 +1,302 @@
1/*
2 * Aic94xx SAS/SATA driver hardware registers definitions.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#ifndef _AIC94XX_REG_H_
28#define _AIC94XX_REG_H_
29
30#include <asm/io.h>
31#include "aic94xx_hwi.h"
32
33/* Values */
34#define AIC9410_DEV_REV_B0 0x8
35
36/* MBAR0, SWA, SWB, SWC, internal memory space addresses */
37#define REG_BASE_ADDR 0xB8000000
38#define REG_BASE_ADDR_CSEQCIO 0xB8002000
39#define REG_BASE_ADDR_EXSI 0xB8042800
40
41#define MBAR0_SWA_SIZE 0x58
42extern u32 MBAR0_SWB_SIZE;
43#define MBAR0_SWC_SIZE 0x8
44
45/* MBAR1, points to On Chip Memory */
46#define OCM_BASE_ADDR 0xA0000000
47#define OCM_MAX_SIZE 0x20000
48
49/* Smallest address possible to reference */
50#define ALL_BASE_ADDR OCM_BASE_ADDR
51
52/* PCI configuration space registers */
53#define PCI_IOBAR_OFFSET 4
54
55#define PCI_CONF_MBAR1 0x6C
56#define PCI_CONF_MBAR0_SWA 0x70
57#define PCI_CONF_MBAR0_SWB 0x74
58#define PCI_CONF_MBAR0_SWC 0x78
59#define PCI_CONF_MBAR_KEY 0x7C
60#define PCI_CONF_FLSH_BAR 0xB8
61
62#include "aic94xx_reg_def.h"
63
64u8 asd_read_reg_byte(struct asd_ha_struct *asd_ha, u32 reg);
65u16 asd_read_reg_word(struct asd_ha_struct *asd_ha, u32 reg);
66u32 asd_read_reg_dword(struct asd_ha_struct *asd_ha, u32 reg);
67
68void asd_write_reg_byte(struct asd_ha_struct *asd_ha, u32 reg, u8 val);
69void asd_write_reg_word(struct asd_ha_struct *asd_ha, u32 reg, u16 val);
70void asd_write_reg_dword(struct asd_ha_struct *asd_ha, u32 reg, u32 val);
71
72void asd_read_reg_string(struct asd_ha_struct *asd_ha, void *dst,
73 u32 offs, int count);
74void asd_write_reg_string(struct asd_ha_struct *asd_ha, void *src,
75 u32 offs, int count);
76
77#define ASD_READ_OCM(type, ord, S) \
78static inline type asd_read_ocm_##ord (struct asd_ha_struct *asd_ha, \
79 u32 offs) \
80{ \
81 struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[1]; \
82 type val = read##S (io_handle->addr + (unsigned long) offs); \
83 rmb(); \
84 return val; \
85}
86
87ASD_READ_OCM(u8, byte, b);
88ASD_READ_OCM(u16,word, w);
89ASD_READ_OCM(u32,dword,l);
90
91#define ASD_WRITE_OCM(type, ord, S) \
92static inline void asd_write_ocm_##ord (struct asd_ha_struct *asd_ha, \
93 u32 offs, type val) \
94{ \
95 struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[1]; \
96 write##S (val, io_handle->addr + (unsigned long) offs); \
97 return; \
98}
99
100ASD_WRITE_OCM(u8, byte, b);
101ASD_WRITE_OCM(u16,word, w);
102ASD_WRITE_OCM(u32,dword,l);
103
104#define ASD_DDBSITE_READ(type, ord) \
105static inline type asd_ddbsite_read_##ord (struct asd_ha_struct *asd_ha, \
106 u16 ddb_site_no, \
107 u16 offs) \
108{ \
109 asd_write_reg_word(asd_ha, ALTCIOADR, MnDDB_SITE + offs); \
110 asd_write_reg_word(asd_ha, ADDBPTR, ddb_site_no); \
111 return asd_read_reg_##ord (asd_ha, CTXACCESS); \
112}
113
114ASD_DDBSITE_READ(u32, dword);
115ASD_DDBSITE_READ(u16, word);
116
117static inline u8 asd_ddbsite_read_byte(struct asd_ha_struct *asd_ha,
118 u16 ddb_site_no,
119 u16 offs)
120{
121 if (offs & 1)
122 return asd_ddbsite_read_word(asd_ha, ddb_site_no,
123 offs & ~1) >> 8;
124 else
125 return asd_ddbsite_read_word(asd_ha, ddb_site_no,
126 offs) & 0xFF;
127}
128
129
130#define ASD_DDBSITE_WRITE(type, ord) \
131static inline void asd_ddbsite_write_##ord (struct asd_ha_struct *asd_ha, \
132 u16 ddb_site_no, \
133 u16 offs, type val) \
134{ \
135 asd_write_reg_word(asd_ha, ALTCIOADR, MnDDB_SITE + offs); \
136 asd_write_reg_word(asd_ha, ADDBPTR, ddb_site_no); \
137 asd_write_reg_##ord (asd_ha, CTXACCESS, val); \
138}
139
140ASD_DDBSITE_WRITE(u32, dword);
141ASD_DDBSITE_WRITE(u16, word);
142
143static inline void asd_ddbsite_write_byte(struct asd_ha_struct *asd_ha,
144 u16 ddb_site_no,
145 u16 offs, u8 val)
146{
147 u16 base = offs & ~1;
148 u16 rval = asd_ddbsite_read_word(asd_ha, ddb_site_no, base);
149 if (offs & 1)
150 rval = (val << 8) | (rval & 0xFF);
151 else
152 rval = (rval & 0xFF00) | val;
153 asd_ddbsite_write_word(asd_ha, ddb_site_no, base, rval);
154}
155
156
157#define ASD_SCBSITE_READ(type, ord) \
158static inline type asd_scbsite_read_##ord (struct asd_ha_struct *asd_ha, \
159 u16 scb_site_no, \
160 u16 offs) \
161{ \
162 asd_write_reg_word(asd_ha, ALTCIOADR, MnSCB_SITE + offs); \
163 asd_write_reg_word(asd_ha, ASCBPTR, scb_site_no); \
164 return asd_read_reg_##ord (asd_ha, CTXACCESS); \
165}
166
167ASD_SCBSITE_READ(u32, dword);
168ASD_SCBSITE_READ(u16, word);
169
170static inline u8 asd_scbsite_read_byte(struct asd_ha_struct *asd_ha,
171 u16 scb_site_no,
172 u16 offs)
173{
174 if (offs & 1)
175 return asd_scbsite_read_word(asd_ha, scb_site_no,
176 offs & ~1) >> 8;
177 else
178 return asd_scbsite_read_word(asd_ha, scb_site_no,
179 offs) & 0xFF;
180}
181
182
183#define ASD_SCBSITE_WRITE(type, ord) \
184static inline void asd_scbsite_write_##ord (struct asd_ha_struct *asd_ha, \
185 u16 scb_site_no, \
186 u16 offs, type val) \
187{ \
188 asd_write_reg_word(asd_ha, ALTCIOADR, MnSCB_SITE + offs); \
189 asd_write_reg_word(asd_ha, ASCBPTR, scb_site_no); \
190 asd_write_reg_##ord (asd_ha, CTXACCESS, val); \
191}
192
193ASD_SCBSITE_WRITE(u32, dword);
194ASD_SCBSITE_WRITE(u16, word);
195
196static inline void asd_scbsite_write_byte(struct asd_ha_struct *asd_ha,
197 u16 scb_site_no,
198 u16 offs, u8 val)
199{
200 u16 base = offs & ~1;
201 u16 rval = asd_scbsite_read_word(asd_ha, scb_site_no, base);
202 if (offs & 1)
203 rval = (val << 8) | (rval & 0xFF);
204 else
205 rval = (rval & 0xFF00) | val;
206 asd_scbsite_write_word(asd_ha, scb_site_no, base, rval);
207}
208
209/**
210 * asd_ddbsite_update_word -- atomically update a word in a ddb site
211 * @asd_ha: pointer to host adapter structure
212 * @ddb_site_no: the DDB site number
213 * @offs: the offset into the DDB
214 * @oldval: old value found in that offset
215 * @newval: the new value to replace it
216 *
217 * This function is used when the sequencers are running and we need to
218 * update a DDB site atomically without expensive pausing and upausing
219 * of the sequencers and accessing the DDB site through the CIO bus.
220 *
221 * Return 0 on success; -EFAULT on parity error; -EAGAIN if the old value
222 * is different than the current value at that offset.
223 */
224static inline int asd_ddbsite_update_word(struct asd_ha_struct *asd_ha,
225 u16 ddb_site_no, u16 offs,
226 u16 oldval, u16 newval)
227{
228 u8 done;
229 u16 oval = asd_ddbsite_read_word(asd_ha, ddb_site_no, offs);
230 if (oval != oldval)
231 return -EAGAIN;
232 asd_write_reg_word(asd_ha, AOLDDATA, oldval);
233 asd_write_reg_word(asd_ha, ANEWDATA, newval);
234 do {
235 done = asd_read_reg_byte(asd_ha, ATOMICSTATCTL);
236 } while (!(done & ATOMICDONE));
237 if (done & ATOMICERR)
238 return -EFAULT; /* parity error */
239 else if (done & ATOMICWIN)
240 return 0; /* success */
241 else
242 return -EAGAIN; /* oldval different than current value */
243}
244
245static inline int asd_ddbsite_update_byte(struct asd_ha_struct *asd_ha,
246 u16 ddb_site_no, u16 offs,
247 u8 _oldval, u8 _newval)
248{
249 u16 base = offs & ~1;
250 u16 oval;
251 u16 nval = asd_ddbsite_read_word(asd_ha, ddb_site_no, base);
252 if (offs & 1) {
253 if ((nval >> 8) != _oldval)
254 return -EAGAIN;
255 nval = (_newval << 8) | (nval & 0xFF);
256 oval = (_oldval << 8) | (nval & 0xFF);
257 } else {
258 if ((nval & 0xFF) != _oldval)
259 return -EAGAIN;
260 nval = (nval & 0xFF00) | _newval;
261 oval = (nval & 0xFF00) | _oldval;
262 }
263 return asd_ddbsite_update_word(asd_ha, ddb_site_no, base, oval, nval);
264}
265
266static inline void asd_write_reg_addr(struct asd_ha_struct *asd_ha, u32 reg,
267 dma_addr_t dma_handle)
268{
269 asd_write_reg_dword(asd_ha, reg, ASD_BUSADDR_LO(dma_handle));
270 asd_write_reg_dword(asd_ha, reg+4, ASD_BUSADDR_HI(dma_handle));
271}
272
273static inline u32 asd_get_cmdctx_size(struct asd_ha_struct *asd_ha)
274{
275 /* DCHREVISION returns 0, possibly broken */
276 u32 ctxmemsize = asd_read_reg_dword(asd_ha, LmMnINT(0,0)) & CTXMEMSIZE;
277 return ctxmemsize ? 65536 : 32768;
278}
279
280static inline u32 asd_get_devctx_size(struct asd_ha_struct *asd_ha)
281{
282 u32 ctxmemsize = asd_read_reg_dword(asd_ha, LmMnINT(0,0)) & CTXMEMSIZE;
283 return ctxmemsize ? 8192 : 4096;
284}
285
286static inline void asd_disable_ints(struct asd_ha_struct *asd_ha)
287{
288 asd_write_reg_dword(asd_ha, CHIMINTEN, RST_CHIMINTEN);
289}
290
291static inline void asd_enable_ints(struct asd_ha_struct *asd_ha)
292{
293 /* Enable COM SAS interrupt on errors, COMSTAT */
294 asd_write_reg_dword(asd_ha, COMSTATEN,
295 EN_CSBUFPERR | EN_CSERR | EN_OVLYERR);
296 /* Enable DCH SAS CFIFTOERR */
297 asd_write_reg_dword(asd_ha, DCHSTATUS, EN_CFIFTOERR);
298 /* Enable Host Device interrupts */
299 asd_write_reg_dword(asd_ha, CHIMINTEN, SET_CHIMINTEN);
300}
301
302#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_reg_def.h b/drivers/scsi/aic94xx/aic94xx_reg_def.h
new file mode 100644
index 000000000000..b79f45f3ad47
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_reg_def.h
@@ -0,0 +1,2398 @@
1/*
2 * Aic94xx SAS/SATA driver hardware registers defintions.
3 *
4 * Copyright (C) 2004 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2004 David Chaw <david_chaw@adaptec.com>
6 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
7 *
8 * Luben Tuikov: Some register value updates to make it work with the window
9 * agnostic register r/w functions. Some register corrections, sizes,
10 * etc.
11 *
12 * This file is licensed under GPLv2.
13 *
14 * This file is part of the aic94xx driver.
15 *
16 * The aic94xx driver is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation; version 2 of the
19 * License.
20 *
21 * The aic94xx driver is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with the aic94xx driver; if not, write to the Free Software
28 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
29 *
30 * $Id: //depot/aic94xx/aic94xx_reg_def.h#27 $
31 *
32 */
33
34#ifndef _ADP94XX_REG_DEF_H_
35#define _ADP94XX_REG_DEF_H_
36
37/*
38 * Common definitions.
39 */
40#define CSEQ_MODE_PAGE_SIZE 0x200 /* CSEQ mode page size */
41#define LmSEQ_MODE_PAGE_SIZE 0x200 /* LmSEQ mode page size */
42#define LmSEQ_HOST_REG_SIZE 0x4000 /* LmSEQ Host Register size */
43
44/********************* COM_SAS registers definition *************************/
45
46/* The base is REG_BASE_ADDR, defined in aic94xx_reg.h.
47 */
48
49/*
50 * CHIM Registers, Address Range : (0x00-0xFF)
51 */
52#define COMBIST (REG_BASE_ADDR + 0x00)
53
54/* bits 31:24 */
55#define L7BLKRST 0x80000000
56#define L6BLKRST 0x40000000
57#define L5BLKRST 0x20000000
58#define L4BLKRST 0x10000000
59#define L3BLKRST 0x08000000
60#define L2BLKRST 0x04000000
61#define L1BLKRST 0x02000000
62#define L0BLKRST 0x01000000
63#define LmBLKRST 0xFF000000
64#define LmBLKRST_COMBIST(phyid) (1 << (24 + phyid))
65
66#define OCMBLKRST 0x00400000
67#define CTXMEMBLKRST 0x00200000
68#define CSEQBLKRST 0x00100000
69#define EXSIBLKRST 0x00040000
70#define DPIBLKRST 0x00020000
71#define DFIFBLKRST 0x00010000
72#define HARDRST 0x00000200
73#define COMBLKRST 0x00000100
74#define FRCDFPERR 0x00000080
75#define FRCCIOPERR 0x00000020
76#define FRCBISTERR 0x00000010
77#define COMBISTEN 0x00000004
78#define COMBISTDONE 0x00000002 /* ro */
79#define COMBISTFAIL 0x00000001 /* ro */
80
81#define COMSTAT (REG_BASE_ADDR + 0x04)
82
83#define REQMBXREAD 0x00000040
84#define RSPMBXAVAIL 0x00000020
85#define CSBUFPERR 0x00000008
86#define OVLYERR 0x00000004
87#define CSERR 0x00000002
88#define OVLYDMADONE 0x00000001
89
90#define COMSTAT_MASK (REQMBXREAD | RSPMBXAVAIL | \
91 CSBUFPERR | OVLYERR | CSERR |\
92 OVLYDMADONE)
93
94#define COMSTATEN (REG_BASE_ADDR + 0x08)
95
96#define EN_REQMBXREAD 0x00000040
97#define EN_RSPMBXAVAIL 0x00000020
98#define EN_CSBUFPERR 0x00000008
99#define EN_OVLYERR 0x00000004
100#define EN_CSERR 0x00000002
101#define EN_OVLYDONE 0x00000001
102
103#define SCBPRO (REG_BASE_ADDR + 0x0C)
104
105#define SCBCONS_MASK 0xFFFF0000
106#define SCBPRO_MASK 0x0000FFFF
107
108#define CHIMREQMBX (REG_BASE_ADDR + 0x10)
109
110#define CHIMRSPMBX (REG_BASE_ADDR + 0x14)
111
112#define CHIMINT (REG_BASE_ADDR + 0x18)
113
114#define EXT_INT0 0x00000800
115#define EXT_INT1 0x00000400
116#define PORRSTDET 0x00000200
117#define HARDRSTDET 0x00000100
118#define DLAVAILQ 0x00000080 /* ro */
119#define HOSTERR 0x00000040
120#define INITERR 0x00000020
121#define DEVINT 0x00000010
122#define COMINT 0x00000008
123#define DEVTIMER2 0x00000004
124#define DEVTIMER1 0x00000002
125#define DLAVAIL 0x00000001
126
127#define CHIMINT_MASK (HOSTERR | INITERR | DEVINT | COMINT |\
128 DEVTIMER2 | DEVTIMER1 | DLAVAIL)
129
130#define DEVEXCEPT_MASK (HOSTERR | INITERR | DEVINT | COMINT)
131
132#define CHIMINTEN (REG_BASE_ADDR + 0x1C)
133
134#define RST_EN_EXT_INT1 0x01000000
135#define RST_EN_EXT_INT0 0x00800000
136#define RST_EN_HOSTERR 0x00400000
137#define RST_EN_INITERR 0x00200000
138#define RST_EN_DEVINT 0x00100000
139#define RST_EN_COMINT 0x00080000
140#define RST_EN_DEVTIMER2 0x00040000
141#define RST_EN_DEVTIMER1 0x00020000
142#define RST_EN_DLAVAIL 0x00010000
143#define SET_EN_EXT_INT1 0x00000100
144#define SET_EN_EXT_INT0 0x00000080
145#define SET_EN_HOSTERR 0x00000040
146#define SET_EN_INITERR 0x00000020
147#define SET_EN_DEVINT 0x00000010
148#define SET_EN_COMINT 0x00000008
149#define SET_EN_DEVTIMER2 0x00000004
150#define SET_EN_DEVTIMER1 0x00000002
151#define SET_EN_DLAVAIL 0x00000001
152
153#define RST_CHIMINTEN (RST_EN_HOSTERR | RST_EN_INITERR | \
154 RST_EN_DEVINT | RST_EN_COMINT | \
155 RST_EN_DEVTIMER2 | RST_EN_DEVTIMER1 |\
156 RST_EN_DLAVAIL)
157
158#define SET_CHIMINTEN (SET_EN_HOSTERR | SET_EN_INITERR |\
159 SET_EN_DEVINT | SET_EN_COMINT |\
160 SET_EN_DLAVAIL)
161
162#define OVLYDMACTL (REG_BASE_ADDR + 0x20)
163
164#define OVLYADR_MASK 0x07FF0000
165#define OVLYLSEQ_MASK 0x0000FF00
166#define OVLYCSEQ 0x00000080
167#define OVLYHALTERR 0x00000040
168#define PIOCMODE 0x00000020
169#define RESETOVLYDMA 0x00000008 /* wo */
170#define STARTOVLYDMA 0x00000004
171#define STOPOVLYDMA 0x00000002 /* wo */
172#define OVLYDMAACT 0x00000001 /* ro */
173
174#define OVLYDMACNT (REG_BASE_ADDR + 0x24)
175
176#define OVLYDOMAIN1 0x20000000 /* ro */
177#define OVLYDOMAIN0 0x10000000
178#define OVLYBUFADR_MASK 0x007F0000
179#define OVLYDMACNT_MASK 0x00003FFF
180
181#define OVLYDMAADR (REG_BASE_ADDR + 0x28)
182
183#define DMAERR (REG_BASE_ADDR + 0x30)
184
185#define OVLYERRSTAT_MASK 0x0000FF00 /* ro */
186#define CSERRSTAT_MASK 0x000000FF /* ro */
187
188#define SPIODATA (REG_BASE_ADDR + 0x34)
189
190/* 0x38 - 0x3C are reserved */
191
192#define T1CNTRLR (REG_BASE_ADDR + 0x40)
193
194#define T1DONE 0x00010000 /* ro */
195#define TIMER64 0x00000400
196#define T1ENABLE 0x00000200
197#define T1RELOAD 0x00000100
198#define T1PRESCALER_MASK 0x00000003
199
200#define T1CMPR (REG_BASE_ADDR + 0x44)
201
202#define T1CNTR (REG_BASE_ADDR + 0x48)
203
204#define T2CNTRLR (REG_BASE_ADDR + 0x4C)
205
206#define T2DONE 0x00010000 /* ro */
207#define T2ENABLE 0x00000200
208#define T2RELOAD 0x00000100
209#define T2PRESCALER_MASK 0x00000003
210
211#define T2CMPR (REG_BASE_ADDR + 0x50)
212
213#define T2CNTR (REG_BASE_ADDR + 0x54)
214
215/* 0x58h - 0xFCh are reserved */
216
217/*
218 * DCH_SAS Registers, Address Range : (0x800-0xFFF)
219 */
220#define CMDCTXBASE (REG_BASE_ADDR + 0x800)
221
222#define DEVCTXBASE (REG_BASE_ADDR + 0x808)
223
224#define CTXDOMAIN (REG_BASE_ADDR + 0x810)
225
226#define DEVCTXDOMAIN1 0x00000008 /* ro */
227#define DEVCTXDOMAIN0 0x00000004
228#define CMDCTXDOMAIN1 0x00000002 /* ro */
229#define CMDCTXDOMAIN0 0x00000001
230
231#define DCHCTL (REG_BASE_ADDR + 0x814)
232
233#define OCMBISTREPAIR 0x00080000
234#define OCMBISTEN 0x00040000
235#define OCMBISTDN 0x00020000 /* ro */
236#define OCMBISTFAIL 0x00010000 /* ro */
237#define DDBBISTEN 0x00004000
238#define DDBBISTDN 0x00002000 /* ro */
239#define DDBBISTFAIL 0x00001000 /* ro */
240#define SCBBISTEN 0x00000400
241#define SCBBISTDN 0x00000200 /* ro */
242#define SCBBISTFAIL 0x00000100 /* ro */
243
244#define MEMSEL_MASK 0x000000E0
245#define MEMSEL_CCM_LSEQ 0x00000000
246#define MEMSEL_CCM_IOP 0x00000020
247#define MEMSEL_CCM_SASCTL 0x00000040
248#define MEMSEL_DCM_LSEQ 0x00000060
249#define MEMSEL_DCM_IOP 0x00000080
250#define MEMSEL_OCM 0x000000A0
251
252#define FRCERR 0x00000010
253#define AUTORLS 0x00000001
254
255#define DCHREVISION (REG_BASE_ADDR + 0x818)
256
257#define DCHREVISION_MASK 0x000000FF
258
259#define DCHSTATUS (REG_BASE_ADDR + 0x81C)
260
261#define EN_CFIFTOERR 0x00020000
262#define CFIFTOERR 0x00000200
263#define CSEQINT 0x00000100 /* ro */
264#define LSEQ7INT 0x00000080 /* ro */
265#define LSEQ6INT 0x00000040 /* ro */
266#define LSEQ5INT 0x00000020 /* ro */
267#define LSEQ4INT 0x00000010 /* ro */
268#define LSEQ3INT 0x00000008 /* ro */
269#define LSEQ2INT 0x00000004 /* ro */
270#define LSEQ1INT 0x00000002 /* ro */
271#define LSEQ0INT 0x00000001 /* ro */
272
273#define LSEQINT_MASK (LSEQ7INT | LSEQ6INT | LSEQ5INT |\
274 LSEQ4INT | LSEQ3INT | LSEQ2INT |\
275 LSEQ1INT | LSEQ0INT)
276
277#define DCHDFIFDEBUG (REG_BASE_ADDR + 0x820)
278#define ENFAIRMST 0x00FF0000
279#define DISWRMST9 0x00000200
280#define DISWRMST8 0x00000100
281#define DISRDMST 0x000000FF
282
283#define ATOMICSTATCTL (REG_BASE_ADDR + 0x824)
284/* 8 bit wide */
285#define AUTOINC 0x80
286#define ATOMICERR 0x04
287#define ATOMICWIN 0x02
288#define ATOMICDONE 0x01
289
290
291#define ALTCIOADR (REG_BASE_ADDR + 0x828)
292/* 16 bit; bits 8:0 define CIO addr space of CSEQ */
293
294#define ASCBPTR (REG_BASE_ADDR + 0x82C)
295/* 16 bit wide */
296
297#define ADDBPTR (REG_BASE_ADDR + 0x82E)
298/* 16 bit wide */
299
300#define ANEWDATA (REG_BASE_ADDR + 0x830)
301/* 16 bit */
302
303#define AOLDDATA (REG_BASE_ADDR + 0x834)
304/* 16 bit */
305
306#define CTXACCESS (REG_BASE_ADDR + 0x838)
307/* 32 bit */
308
309/* 0x83Ch - 0xFFCh are reserved */
310
311/*
312 * ARP2 External Processor Registers, Address Range : (0x00-0x1F)
313 */
314#define ARP2CTL 0x00
315
316#define FRCSCRPERR 0x00040000
317#define FRCARP2PERR 0x00020000
318#define FRCARP2ILLOPC 0x00010000
319#define ENWAITTO 0x00008000
320#define PERRORDIS 0x00004000
321#define FAILDIS 0x00002000
322#define CIOPERRDIS 0x00001000
323#define BREAKEN3 0x00000800
324#define BREAKEN2 0x00000400
325#define BREAKEN1 0x00000200
326#define BREAKEN0 0x00000100
327#define EPAUSE 0x00000008
328#define PAUSED 0x00000004 /* ro */
329#define STEP 0x00000002
330#define ARP2RESET 0x00000001 /* wo */
331
332#define ARP2INT 0x04
333
334#define HALTCODE_MASK 0x00FF0000 /* ro */
335#define ARP2WAITTO 0x00000100
336#define ARP2HALTC 0x00000080
337#define ARP2ILLOPC 0x00000040
338#define ARP2PERR 0x00000020
339#define ARP2CIOPERR 0x00000010
340#define ARP2BREAK3 0x00000008
341#define ARP2BREAK2 0x00000004
342#define ARP2BREAK1 0x00000002
343#define ARP2BREAK0 0x00000001
344
345#define ARP2INTEN 0x08
346
347#define EN_ARP2WAITTO 0x00000100
348#define EN_ARP2HALTC 0x00000080
349#define EN_ARP2ILLOPC 0x00000040
350#define EN_ARP2PERR 0x00000020
351#define EN_ARP2CIOPERR 0x00000010
352#define EN_ARP2BREAK3 0x00000008
353#define EN_ARP2BREAK2 0x00000004
354#define EN_ARP2BREAK1 0x00000002
355#define EN_ARP2BREAK0 0x00000001
356
357#define ARP2BREAKADR01 0x0C
358
359#define BREAKADR1_MASK 0x0FFF0000
360#define BREAKADR0_MASK 0x00000FFF
361
362#define ARP2BREAKADR23 0x10
363
364#define BREAKADR3_MASK 0x0FFF0000
365#define BREAKADR2_MASK 0x00000FFF
366
367/* 0x14h - 0x1Ch are reserved */
368
369/*
370 * ARP2 Registers, Address Range : (0x00-0x1F)
371 * The definitions have the same address offset for CSEQ and LmSEQ
372 * CIO Bus Registers.
373 */
374#define MODEPTR 0x00
375
376#define DSTMODE 0xF0
377#define SRCMODE 0x0F
378
379#define ALTMODE 0x01
380
381#define ALTDMODE 0xF0
382#define ALTSMODE 0x0F
383
384#define ATOMICXCHG 0x02
385
386#define FLAG 0x04
387
388#define INTCODE_MASK 0xF0
389#define ALTMODEV2 0x04
390#define CARRY_INT 0x02
391#define CARRY 0x01
392
393#define ARP2INTCTL 0x05
394
395#define PAUSEDIS 0x80
396#define RSTINTCTL 0x40
397#define POPALTMODE 0x08
398#define ALTMODEV 0x04
399#define INTMASK 0x02
400#define IRET 0x01
401
402#define STACK 0x06
403
404#define FUNCTION1 0x07
405
406#define PRGMCNT 0x08
407
408#define ACCUM 0x0A
409
410#define SINDEX 0x0C
411
412#define DINDEX 0x0E
413
414#define ALLONES 0x10
415
416#define ALLZEROS 0x11
417
418#define SINDIR 0x12
419
420#define DINDIR 0x13
421
422#define JUMLDIR 0x14
423
424#define ARP2HALTCODE 0x15
425
426#define CURRADDR 0x16
427
428#define LASTADDR 0x18
429
430#define NXTLADDR 0x1A
431
432#define DBGPORTPTR 0x1C
433
434#define DBGPORT 0x1D
435
436/*
437 * CIO Registers.
438 * The definitions have the same address offset for CSEQ and LmSEQ
439 * CIO Bus Registers.
440 */
441#define MnSCBPTR 0x20
442
443#define MnDDBPTR 0x22
444
445#define SCRATCHPAGE 0x24
446
447#define MnSCRATCHPAGE 0x25
448
449#define SCRATCHPAGESV 0x26
450
451#define MnSCRATCHPAGESV 0x27
452
453#define MnDMAERRS 0x46
454
455#define MnSGDMAERRS 0x47
456
457#define MnSGBUF 0x53
458
459#define MnSGDMASTAT 0x5b
460
461#define MnDDMACTL 0x5c /* RAZOR.rspec.fm rev 1.5 is wrong */
462
463#define MnDDMASTAT 0x5d /* RAZOR.rspec.fm rev 1.5 is wrong */
464
465#define MnDDMAMODE 0x5e /* RAZOR.rspec.fm rev 1.5 is wrong */
466
467#define MnDMAENG 0x60
468
469#define MnPIPECTL 0x61
470
471#define MnSGBADR 0x65
472
473#define MnSCB_SITE 0x100
474
475#define MnDDB_SITE 0x180
476
477/*
478 * The common definitions below have the same address offset for both
479 * CSEQ and LmSEQ.
480 */
481#define BISTCTL0 0x4C
482
483#define BISTCTL1 0x50
484
485#define MAPPEDSCR 0x800
486
487/*
488 * CSEQ Host Register, Address Range : (0x000-0xFFC)
489 */
490#define CSEQ_HOST_REG_BASE_ADR 0xB8001000
491
492#define CARP2CTL (CSEQ_HOST_REG_BASE_ADR + ARP2CTL)
493
494#define CARP2INT (CSEQ_HOST_REG_BASE_ADR + ARP2INT)
495
496#define CARP2INTEN (CSEQ_HOST_REG_BASE_ADR + ARP2INTEN)
497
498#define CARP2BREAKADR01 (CSEQ_HOST_REG_BASE_ADR+ARP2BREAKADR01)
499
500#define CARP2BREAKADR23 (CSEQ_HOST_REG_BASE_ADR+ARP2BREAKADR23)
501
502#define CBISTCTL (CSEQ_HOST_REG_BASE_ADR + BISTCTL1)
503
504#define CSEQRAMBISTEN 0x00000040
505#define CSEQRAMBISTDN 0x00000020 /* ro */
506#define CSEQRAMBISTFAIL 0x00000010 /* ro */
507#define CSEQSCRBISTEN 0x00000004
508#define CSEQSCRBISTDN 0x00000002 /* ro */
509#define CSEQSCRBISTFAIL 0x00000001 /* ro */
510
511#define CMAPPEDSCR (CSEQ_HOST_REG_BASE_ADR + MAPPEDSCR)
512
513/*
514 * CSEQ CIO Bus Registers, Address Range : (0x0000-0x1FFC)
515 * 16 modes, each mode is 512 bytes.
516 * Unless specified, the register should valid for all modes.
517 */
518#define CSEQ_CIO_REG_BASE_ADR REG_BASE_ADDR_CSEQCIO
519
520#define CSEQm_CIO_REG(Mode, Reg) \
521 (CSEQ_CIO_REG_BASE_ADR + \
522 ((u32) (Mode) * CSEQ_MODE_PAGE_SIZE) + (u32) (Reg))
523
524#define CMODEPTR (CSEQ_CIO_REG_BASE_ADR + MODEPTR)
525
526#define CALTMODE (CSEQ_CIO_REG_BASE_ADR + ALTMODE)
527
528#define CATOMICXCHG (CSEQ_CIO_REG_BASE_ADR + ATOMICXCHG)
529
530#define CFLAG (CSEQ_CIO_REG_BASE_ADR + FLAG)
531
532#define CARP2INTCTL (CSEQ_CIO_REG_BASE_ADR + ARP2INTCTL)
533
534#define CSTACK (CSEQ_CIO_REG_BASE_ADR + STACK)
535
536#define CFUNCTION1 (CSEQ_CIO_REG_BASE_ADR + FUNCTION1)
537
538#define CPRGMCNT (CSEQ_CIO_REG_BASE_ADR + PRGMCNT)
539
540#define CACCUM (CSEQ_CIO_REG_BASE_ADR + ACCUM)
541
542#define CSINDEX (CSEQ_CIO_REG_BASE_ADR + SINDEX)
543
544#define CDINDEX (CSEQ_CIO_REG_BASE_ADR + DINDEX)
545
546#define CALLONES (CSEQ_CIO_REG_BASE_ADR + ALLONES)
547
548#define CALLZEROS (CSEQ_CIO_REG_BASE_ADR + ALLZEROS)
549
550#define CSINDIR (CSEQ_CIO_REG_BASE_ADR + SINDIR)
551
552#define CDINDIR (CSEQ_CIO_REG_BASE_ADR + DINDIR)
553
554#define CJUMLDIR (CSEQ_CIO_REG_BASE_ADR + JUMLDIR)
555
556#define CARP2HALTCODE (CSEQ_CIO_REG_BASE_ADR + ARP2HALTCODE)
557
558#define CCURRADDR (CSEQ_CIO_REG_BASE_ADR + CURRADDR)
559
560#define CLASTADDR (CSEQ_CIO_REG_BASE_ADR + LASTADDR)
561
562#define CNXTLADDR (CSEQ_CIO_REG_BASE_ADR + NXTLADDR)
563
564#define CDBGPORTPTR (CSEQ_CIO_REG_BASE_ADR + DBGPORTPTR)
565
566#define CDBGPORT (CSEQ_CIO_REG_BASE_ADR + DBGPORT)
567
568#define CSCRATCHPAGE (CSEQ_CIO_REG_BASE_ADR + SCRATCHPAGE)
569
570#define CMnSCBPTR(Mode) CSEQm_CIO_REG(Mode, MnSCBPTR)
571
572#define CMnDDBPTR(Mode) CSEQm_CIO_REG(Mode, MnDDBPTR)
573
574#define CMnSCRATCHPAGE(Mode) CSEQm_CIO_REG(Mode, MnSCRATCHPAGE)
575
576#define CLINKCON (CSEQ_CIO_REG_BASE_ADR + 0x28)
577
578#define CCIOAACESS (CSEQ_CIO_REG_BASE_ADR + 0x2C)
579
580/* mode 0-7 */
581#define MnREQMBX 0x30
582#define CMnREQMBX(Mode) CSEQm_CIO_REG(Mode, 0x30)
583
584/* mode 8 */
585#define CSEQCON CSEQm_CIO_REG(8, 0x30)
586
587/* mode 0-7 */
588#define MnRSPMBX 0x34
589#define CMnRSPMBX(Mode) CSEQm_CIO_REG(Mode, 0x34)
590
591/* mode 8 */
592#define CSEQCOMCTL CSEQm_CIO_REG(8, 0x34)
593
594/* mode 8 */
595#define CSEQCOMSTAT CSEQm_CIO_REG(8, 0x35)
596
597/* mode 8 */
598#define CSEQCOMINTEN CSEQm_CIO_REG(8, 0x36)
599
600/* mode 8 */
601#define CSEQCOMDMACTL CSEQm_CIO_REG(8, 0x37)
602
603#define CSHALTERR 0x10
604#define RESETCSDMA 0x08 /* wo */
605#define STARTCSDMA 0x04
606#define STOPCSDMA 0x02 /* wo */
607#define CSDMAACT 0x01 /* ro */
608
609/* mode 0-7 */
610#define MnINT 0x38
611#define CMnINT(Mode) CSEQm_CIO_REG(Mode, 0x38)
612
613#define CMnREQMBXE 0x02
614#define CMnRSPMBXF 0x01
615#define CMnINT_MASK 0x00000003
616
617/* mode 8 */
618#define CSEQREQMBX CSEQm_CIO_REG(8, 0x38)
619
620/* mode 0-7 */
621#define MnINTEN 0x3C
622#define CMnINTEN(Mode) CSEQm_CIO_REG(Mode, 0x3C)
623
624#define EN_CMnRSPMBXF 0x01
625
626/* mode 8 */
627#define CSEQRSPMBX CSEQm_CIO_REG(8, 0x3C)
628
629/* mode 8 */
630#define CSDMAADR CSEQm_CIO_REG(8, 0x40)
631
632/* mode 8 */
633#define CSDMACNT CSEQm_CIO_REG(8, 0x48)
634
635/* mode 8 */
636#define CSEQDLCTL CSEQm_CIO_REG(8, 0x4D)
637
638#define DONELISTEND 0x10
639#define DONELISTSIZE_MASK 0x0F
640#define DONELISTSIZE_8ELEM 0x01
641#define DONELISTSIZE_16ELEM 0x02
642#define DONELISTSIZE_32ELEM 0x03
643#define DONELISTSIZE_64ELEM 0x04
644#define DONELISTSIZE_128ELEM 0x05
645#define DONELISTSIZE_256ELEM 0x06
646#define DONELISTSIZE_512ELEM 0x07
647#define DONELISTSIZE_1024ELEM 0x08
648#define DONELISTSIZE_2048ELEM 0x09
649#define DONELISTSIZE_4096ELEM 0x0A
650#define DONELISTSIZE_8192ELEM 0x0B
651#define DONELISTSIZE_16384ELEM 0x0C
652
653/* mode 8 */
654#define CSEQDLOFFS CSEQm_CIO_REG(8, 0x4E)
655
656/* mode 11 */
657#define CM11INTVEC0 CSEQm_CIO_REG(11, 0x50)
658
659/* mode 11 */
660#define CM11INTVEC1 CSEQm_CIO_REG(11, 0x52)
661
662/* mode 11 */
663#define CM11INTVEC2 CSEQm_CIO_REG(11, 0x54)
664
665#define CCONMSK (CSEQ_CIO_REG_BASE_ADR + 0x60)
666
667#define CCONEXIST (CSEQ_CIO_REG_BASE_ADR + 0x61)
668
669#define CCONMODE (CSEQ_CIO_REG_BASE_ADR + 0x62)
670
671#define CTIMERCALC (CSEQ_CIO_REG_BASE_ADR + 0x64)
672
673#define CINTDIS (CSEQ_CIO_REG_BASE_ADR + 0x68)
674
675/* mode 8, 32x32 bits, 128 bytes of mapped buffer */
676#define CSBUFFER CSEQm_CIO_REG(8, 0x80)
677
678#define CSCRATCH (CSEQ_CIO_REG_BASE_ADR + 0x1C0)
679
680/* mode 0-8 */
681#define CMnSCRATCH(Mode) CSEQm_CIO_REG(Mode, 0x1E0)
682
683/*
684 * CSEQ Mapped Instruction RAM Page, Address Range : (0x0000-0x1FFC)
685 */
686#define CSEQ_RAM_REG_BASE_ADR 0xB8004000
687
688/*
689 * The common definitions below have the same address offset for all the Link
690 * sequencers.
691 */
692#define MODECTL 0x40
693
694#define DBGMODE 0x44
695
696#define CONTROL 0x48
697#define LEDTIMER 0x00010000
698#define LEDTIMERS_10us 0x00000000
699#define LEDTIMERS_1ms 0x00000800
700#define LEDTIMERS_100ms 0x00001000
701#define LEDMODE_TXRX 0x00000000
702#define LEDMODE_CONNECTED 0x00000200
703#define LEDPOL 0x00000100
704
705#define LSEQRAM 0x1000
706
707/*
708 * LmSEQ Host Registers, Address Range : (0x0000-0x3FFC)
709 */
710#define LSEQ0_HOST_REG_BASE_ADR 0xB8020000
711#define LSEQ1_HOST_REG_BASE_ADR 0xB8024000
712#define LSEQ2_HOST_REG_BASE_ADR 0xB8028000
713#define LSEQ3_HOST_REG_BASE_ADR 0xB802C000
714#define LSEQ4_HOST_REG_BASE_ADR 0xB8030000
715#define LSEQ5_HOST_REG_BASE_ADR 0xB8034000
716#define LSEQ6_HOST_REG_BASE_ADR 0xB8038000
717#define LSEQ7_HOST_REG_BASE_ADR 0xB803C000
718
719#define LmARP2CTL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
720 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
721 ARP2CTL)
722
723#define LmARP2INT(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
724 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
725 ARP2INT)
726
727#define LmARP2INTEN(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
728 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
729 ARP2INTEN)
730
731#define LmDBGMODE(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
732 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
733 DBGMODE)
734
735#define LmCONTROL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
736 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
737 CONTROL)
738
739#define LmARP2BREAKADR01(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
740 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
741 ARP2BREAKADR01)
742
743#define LmARP2BREAKADR23(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
744 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
745 ARP2BREAKADR23)
746
747#define LmMODECTL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
748 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
749 MODECTL)
750
751#define LmAUTODISCI 0x08000000
752#define LmDSBLBITLT 0x04000000
753#define LmDSBLANTT 0x02000000
754#define LmDSBLCRTT 0x01000000
755#define LmDSBLCONT 0x00000100
756#define LmPRIMODE 0x00000080
757#define LmDSBLHOLD 0x00000040
758#define LmDISACK 0x00000020
759#define LmBLIND48 0x00000010
760#define LmRCVMODE_MASK 0x0000000C
761#define LmRCVMODE_PLD 0x00000000
762#define LmRCVMODE_HPC 0x00000004
763
764#define LmDBGMODE(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
765 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
766 DBGMODE)
767
768#define LmFRCPERR 0x80000000
769#define LmMEMSEL_MASK 0x30000000
770#define LmFRCRBPERR 0x00000000
771#define LmFRCTBPERR 0x10000000
772#define LmFRCSGBPERR 0x20000000
773#define LmFRCARBPERR 0x30000000
774#define LmRCVIDW 0x00080000
775#define LmINVDWERR 0x00040000
776#define LmRCVDISP 0x00004000
777#define LmDISPERR 0x00002000
778#define LmDSBLDSCR 0x00000800
779#define LmDSBLSCR 0x00000400
780#define LmFRCNAK 0x00000200
781#define LmFRCROFS 0x00000100
782#define LmFRCCRC 0x00000080
783#define LmFRMTYPE_MASK 0x00000070
784#define LmSG_DATA 0x00000000
785#define LmSG_COMMAND 0x00000010
786#define LmSG_TASK 0x00000020
787#define LmSG_TGTXFER 0x00000030
788#define LmSG_RESPONSE 0x00000040
789#define LmSG_IDENADDR 0x00000050
790#define LmSG_OPENADDR 0x00000060
791#define LmDISCRCGEN 0x00000008
792#define LmDISCRCCHK 0x00000004
793#define LmSSXMTFRM 0x00000002
794#define LmSSRCVFRM 0x00000001
795
796#define LmCONTROL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
797 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
798 CONTROL)
799
800#define LmSTEPXMTFRM 0x00000002
801#define LmSTEPRCVFRM 0x00000001
802
803#define LmBISTCTL0(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
804 ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \
805 BISTCTL0)
806
807#define ARBBISTEN 0x40000000
808#define ARBBISTDN 0x20000000 /* ro */
809#define ARBBISTFAIL 0x10000000 /* ro */
810#define TBBISTEN 0x00000400
811#define TBBISTDN 0x00000200 /* ro */
812#define TBBISTFAIL 0x00000100 /* ro */
813#define RBBISTEN 0x00000040
814#define RBBISTDN 0x00000020 /* ro */
815#define RBBISTFAIL 0x00000010 /* ro */
816#define SGBISTEN 0x00000004
817#define SGBISTDN 0x00000002 /* ro */
818#define SGBISTFAIL 0x00000001 /* ro */
819
820#define LmBISTCTL1(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
821 ((LinkNum)*LmSEQ_HOST_REG_SIZE) +\
822 BISTCTL1)
823
824#define LmRAMPAGE1 0x00000200
825#define LmRAMPAGE0 0x00000100
826#define LmIMEMBISTEN 0x00000040
827#define LmIMEMBISTDN 0x00000020 /* ro */
828#define LmIMEMBISTFAIL 0x00000010 /* ro */
829#define LmSCRBISTEN 0x00000004
830#define LmSCRBISTDN 0x00000002 /* ro */
831#define LmSCRBISTFAIL 0x00000001 /* ro */
832#define LmRAMPAGE (LmRAMPAGE1 + LmRAMPAGE0)
833#define LmRAMPAGE_LSHIFT 0x8
834
835#define LmSCRATCH(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
836 ((LinkNum) * LmSEQ_HOST_REG_SIZE) +\
837 MAPPEDSCR)
838
839#define LmSEQRAM(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \
840 ((LinkNum) * LmSEQ_HOST_REG_SIZE) +\
841 LSEQRAM)
842
843/*
844 * LmSEQ CIO Bus Register, Address Range : (0x0000-0xFFC)
845 * 8 modes, each mode is 512 bytes.
846 * Unless specified, the register should valid for all modes.
847 */
848#define LmSEQ_CIOBUS_REG_BASE 0x2000
849
850#define LmSEQ_PHY_BASE(Mode, LinkNum) \
851 (LSEQ0_HOST_REG_BASE_ADR + \
852 (LmSEQ_HOST_REG_SIZE * (u32) (LinkNum)) + \
853 LmSEQ_CIOBUS_REG_BASE + \
854 ((u32) (Mode) * LmSEQ_MODE_PAGE_SIZE))
855
856#define LmSEQ_PHY_REG(Mode, LinkNum, Reg) \
857 (LmSEQ_PHY_BASE(Mode, LinkNum) + (u32) (Reg))
858
859#define LmMODEPTR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, MODEPTR)
860
861#define LmALTMODE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ALTMODE)
862
863#define LmATOMICXCHG(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ATOMICXCHG)
864
865#define LmFLAG(LinkNum) LmSEQ_PHY_REG(0, LinkNum, FLAG)
866
867#define LmARP2INTCTL(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ARP2INTCTL)
868
869#define LmSTACK(LinkNum) LmSEQ_PHY_REG(0, LinkNum, STACK)
870
871#define LmFUNCTION1(LinkNum) LmSEQ_PHY_REG(0, LinkNum, FUNCTION1)
872
873#define LmPRGMCNT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, PRGMCNT)
874
875#define LmACCUM(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ACCUM)
876
877#define LmSINDEX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, SINDEX)
878
879#define LmDINDEX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DINDEX)
880
881#define LmALLONES(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ALLONES)
882
883#define LmALLZEROS(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ALLZEROS)
884
885#define LmSINDIR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, SINDIR)
886
887#define LmDINDIR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DINDIR)
888
889#define LmJUMLDIR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, JUMLDIR)
890
891#define LmARP2HALTCODE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ARP2HALTCODE)
892
893#define LmCURRADDR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, CURRADDR)
894
895#define LmLASTADDR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, LASTADDR)
896
897#define LmNXTLADDR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, NXTLADDR)
898
899#define LmDBGPORTPTR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DBGPORTPTR)
900
901#define LmDBGPORT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DBGPORT)
902
903#define LmSCRATCHPAGE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, SCRATCHPAGE)
904
905#define LmMnSCRATCHPAGE(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, \
906 MnSCRATCHPAGE)
907
908#define LmTIMERCALC(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x28)
909
910#define LmREQMBX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x30)
911
912#define LmRSPMBX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x34)
913
914#define LmMnINT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x38)
915
916#define CTXMEMSIZE 0x80000000 /* ro */
917#define LmACKREQ 0x08000000
918#define LmNAKREQ 0x04000000
919#define LmMnXMTERR 0x02000000
920#define LmM5OOBSVC 0x01000000
921#define LmHWTINT 0x00800000
922#define LmMnCTXDONE 0x00100000
923#define LmM2REQMBXF 0x00080000
924#define LmM2RSPMBXE 0x00040000
925#define LmMnDMAERR 0x00020000
926#define LmRCVPRIM 0x00010000
927#define LmRCVERR 0x00008000
928#define LmADDRRCV 0x00004000
929#define LmMnHDRMISS 0x00002000
930#define LmMnWAITSCB 0x00001000
931#define LmMnRLSSCB 0x00000800
932#define LmMnSAVECTX 0x00000400
933#define LmMnFETCHSG 0x00000200
934#define LmMnLOADCTX 0x00000100
935#define LmMnCFGICL 0x00000080
936#define LmMnCFGSATA 0x00000040
937#define LmMnCFGEXPSATA 0x00000020
938#define LmMnCFGCMPLT 0x00000010
939#define LmMnCFGRBUF 0x00000008
940#define LmMnSAVETTR 0x00000004
941#define LmMnCFGRDAT 0x00000002
942#define LmMnCFGHDR 0x00000001
943
944#define LmMnINTEN(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x3C)
945
946#define EN_LmACKREQ 0x08000000
947#define EN_LmNAKREQ 0x04000000
948#define EN_LmMnXMTERR 0x02000000
949#define EN_LmM5OOBSVC 0x01000000
950#define EN_LmHWTINT 0x00800000
951#define EN_LmMnCTXDONE 0x00100000
952#define EN_LmM2REQMBXF 0x00080000
953#define EN_LmM2RSPMBXE 0x00040000
954#define EN_LmMnDMAERR 0x00020000
955#define EN_LmRCVPRIM 0x00010000
956#define EN_LmRCVERR 0x00008000
957#define EN_LmADDRRCV 0x00004000
958#define EN_LmMnHDRMISS 0x00002000
959#define EN_LmMnWAITSCB 0x00001000
960#define EN_LmMnRLSSCB 0x00000800
961#define EN_LmMnSAVECTX 0x00000400
962#define EN_LmMnFETCHSG 0x00000200
963#define EN_LmMnLOADCTX 0x00000100
964#define EN_LmMnCFGICL 0x00000080
965#define EN_LmMnCFGSATA 0x00000040
966#define EN_LmMnCFGEXPSATA 0x00000020
967#define EN_LmMnCFGCMPLT 0x00000010
968#define EN_LmMnCFGRBUF 0x00000008
969#define EN_LmMnSAVETTR 0x00000004
970#define EN_LmMnCFGRDAT 0x00000002
971#define EN_LmMnCFGHDR 0x00000001
972
973#define LmM0INTEN_MASK (EN_LmMnCFGCMPLT | EN_LmMnCFGRBUF | \
974 EN_LmMnSAVETTR | EN_LmMnCFGRDAT | \
975 EN_LmMnCFGHDR | EN_LmRCVERR | \
976 EN_LmADDRRCV | EN_LmMnHDRMISS | \
977 EN_LmMnRLSSCB | EN_LmMnSAVECTX | \
978 EN_LmMnFETCHSG | EN_LmMnLOADCTX | \
979 EN_LmHWTINT | EN_LmMnCTXDONE | \
980 EN_LmRCVPRIM | EN_LmMnCFGSATA | \
981 EN_LmMnCFGEXPSATA | EN_LmMnDMAERR)
982
983#define LmM1INTEN_MASK (EN_LmMnCFGCMPLT | EN_LmADDRRCV | \
984 EN_LmMnRLSSCB | EN_LmMnSAVECTX | \
985 EN_LmMnFETCHSG | EN_LmMnLOADCTX | \
986 EN_LmMnXMTERR | EN_LmHWTINT | \
987 EN_LmMnCTXDONE | EN_LmRCVPRIM | \
988 EN_LmRCVERR | EN_LmMnDMAERR)
989
990#define LmM2INTEN_MASK (EN_LmADDRRCV | EN_LmHWTINT | \
991 EN_LmM2REQMBXF | EN_LmRCVPRIM | \
992 EN_LmRCVERR)
993
994#define LmM5INTEN_MASK (EN_LmADDRRCV | EN_LmM5OOBSVC | \
995 EN_LmHWTINT | EN_LmRCVPRIM | \
996 EN_LmRCVERR)
997
998#define LmXMTPRIMD(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x40)
999
1000#define LmXMTPRIMCS(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x44)
1001
1002#define LmCONSTAT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x45)
1003
1004#define LmMnDMAERRS(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x46)
1005
1006#define LmMnSGDMAERRS(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x47)
1007
1008#define LmM0EXPHDRP(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x48)
1009
1010#define LmM1SASALIGN(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x48)
1011#define SAS_ALIGN_DEFAULT 0xFF
1012
1013#define LmM0MSKHDRP(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x49)
1014
1015#define LmM1STPALIGN(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x49)
1016#define STP_ALIGN_DEFAULT 0x1F
1017
1018#define LmM0RCVHDRP(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x4A)
1019
1020#define LmM1XMTHDRP(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x4A)
1021
1022#define LmM0ICLADR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x4B)
1023
1024#define LmM1ALIGNMODE(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x4B)
1025
1026#define LmDISALIGN 0x20
1027#define LmROTSTPALIGN 0x10
1028#define LmSTPALIGN 0x08
1029#define LmROTNOTIFY 0x04
1030#define LmDUALALIGN 0x02
1031#define LmROTALIGN 0x01
1032
1033#define LmM0EXPRCVNT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x4C)
1034
1035#define LmM1XMTCNT(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x4C)
1036
1037#define LmMnBUFSTAT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x4E)
1038
1039#define LmMnBUFPERR 0x01
1040
1041/* mode 0-1 */
1042#define LmMnXFRLVL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x59)
1043
1044#define LmMnXFRLVL_128 0x05
1045#define LmMnXFRLVL_256 0x04
1046#define LmMnXFRLVL_512 0x03
1047#define LmMnXFRLVL_1024 0x02
1048#define LmMnXFRLVL_1536 0x01
1049#define LmMnXFRLVL_2048 0x00
1050
1051 /* mode 0-1 */
1052#define LmMnSGDMACTL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5A)
1053
1054#define LmMnRESETSG 0x04
1055#define LmMnSTOPSG 0x02
1056#define LmMnSTARTSG 0x01
1057
1058/* mode 0-1 */
1059#define LmMnSGDMASTAT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5B)
1060
1061/* mode 0-1 */
1062#define LmMnDDMACTL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5C)
1063
1064#define LmMnFLUSH 0x40 /* wo */
1065#define LmMnRLSRTRY 0x20 /* wo */
1066#define LmMnDISCARD 0x10 /* wo */
1067#define LmMnRESETDAT 0x08 /* wo */
1068#define LmMnSUSDAT 0x04 /* wo */
1069#define LmMnSTOPDAT 0x02 /* wo */
1070#define LmMnSTARTDAT 0x01 /* wo */
1071
1072/* mode 0-1 */
1073#define LmMnDDMASTAT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5D)
1074
1075#define LmMnDPEMPTY 0x80
1076#define LmMnFLUSHING 0x40
1077#define LmMnDDMAREQ 0x20
1078#define LmMnHDMAREQ 0x10
1079#define LmMnDATFREE 0x08
1080#define LmMnDATSUS 0x04
1081#define LmMnDATACT 0x02
1082#define LmMnDATEN 0x01
1083
1084/* mode 0-1 */
1085#define LmMnDDMAMODE(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5E)
1086
1087#define LmMnDMATYPE_NORMAL 0x0000
1088#define LmMnDMATYPE_HOST_ONLY_TX 0x0001
1089#define LmMnDMATYPE_DEVICE_ONLY_TX 0x0002
1090#define LmMnDMATYPE_INVALID 0x0003
1091#define LmMnDMATYPE_MASK 0x0003
1092
1093#define LmMnDMAWRAP 0x0004
1094#define LmMnBITBUCKET 0x0008
1095#define LmMnDISHDR 0x0010
1096#define LmMnSTPCRC 0x0020
1097#define LmXTEST 0x0040
1098#define LmMnDISCRC 0x0080
1099#define LmMnENINTLK 0x0100
1100#define LmMnADDRFRM 0x0400
1101#define LmMnENXMTCRC 0x0800
1102
1103/* mode 0-1 */
1104#define LmMnXFRCNT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x70)
1105
1106/* mode 0-1 */
1107#define LmMnDPSEL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x7B)
1108#define LmMnDPSEL_MASK 0x07
1109#define LmMnEOLPRE 0x40
1110#define LmMnEOSPRE 0x80
1111
1112/* Registers used in conjunction with LmMnDPSEL and LmMnDPACC registers */
1113/* Receive Mode n = 0 */
1114#define LmMnHRADDR 0x00
1115#define LmMnHBYTECNT 0x01
1116#define LmMnHREWIND 0x02
1117#define LmMnDWADDR 0x03
1118#define LmMnDSPACECNT 0x04
1119#define LmMnDFRMSIZE 0x05
1120
1121/* Registers used in conjunction with LmMnDPSEL and LmMnDPACC registers */
1122/* Transmit Mode n = 1 */
1123#define LmMnHWADDR 0x00
1124#define LmMnHSPACECNT 0x01
1125/* #define LmMnHREWIND 0x02 */
1126#define LmMnDRADDR 0x03
1127#define LmMnDBYTECNT 0x04
1128/* #define LmMnDFRMSIZE 0x05 */
1129
1130/* mode 0-1 */
1131#define LmMnDPACC(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x78)
1132#define LmMnDPACC_MASK 0x00FFFFFF
1133
1134/* mode 0-1 */
1135#define LmMnHOLDLVL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x7D)
1136
1137#define LmPRMSTAT0(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x80)
1138#define LmPRMSTAT0BYTE0 0x80
1139#define LmPRMSTAT0BYTE1 0x81
1140#define LmPRMSTAT0BYTE2 0x82
1141#define LmPRMSTAT0BYTE3 0x83
1142
1143#define LmFRAMERCVD 0x80000000
1144#define LmXFRRDYRCVD 0x40000000
1145#define LmUNKNOWNP 0x20000000
1146#define LmBREAK 0x10000000
1147#define LmDONE 0x08000000
1148#define LmOPENACPT 0x04000000
1149#define LmOPENRJCT 0x02000000
1150#define LmOPENRTRY 0x01000000
1151#define LmCLOSERV1 0x00800000
1152#define LmCLOSERV0 0x00400000
1153#define LmCLOSENORM 0x00200000
1154#define LmCLOSECLAF 0x00100000
1155#define LmNOTIFYRV2 0x00080000
1156#define LmNOTIFYRV1 0x00040000
1157#define LmNOTIFYRV0 0x00020000
1158#define LmNOTIFYSPIN 0x00010000
1159#define LmBROADRV4 0x00008000
1160#define LmBROADRV3 0x00004000
1161#define LmBROADRV2 0x00002000
1162#define LmBROADRV1 0x00001000
1163#define LmBROADSES 0x00000800
1164#define LmBROADRVCH1 0x00000400
1165#define LmBROADRVCH0 0x00000200
1166#define LmBROADCH 0x00000100
1167#define LmAIPRVWP 0x00000080
1168#define LmAIPWP 0x00000040
1169#define LmAIPWD 0x00000020
1170#define LmAIPWC 0x00000010
1171#define LmAIPRV2 0x00000008
1172#define LmAIPRV1 0x00000004
1173#define LmAIPRV0 0x00000002
1174#define LmAIPNRML 0x00000001
1175
1176#define LmBROADCAST_MASK (LmBROADCH | LmBROADRVCH0 | \
1177 LmBROADRVCH1)
1178
1179#define LmPRMSTAT1(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x84)
1180#define LmPRMSTAT1BYTE0 0x84
1181#define LmPRMSTAT1BYTE1 0x85
1182#define LmPRMSTAT1BYTE2 0x86
1183#define LmPRMSTAT1BYTE3 0x87
1184
1185#define LmFRMRCVDSTAT 0x80000000
1186#define LmBREAK_DET 0x04000000
1187#define LmCLOSE_DET 0x02000000
1188#define LmDONE_DET 0x01000000
1189#define LmXRDY 0x00040000
1190#define LmSYNCSRST 0x00020000
1191#define LmSYNC 0x00010000
1192#define LmXHOLD 0x00008000
1193#define LmRRDY 0x00004000
1194#define LmHOLD 0x00002000
1195#define LmROK 0x00001000
1196#define LmRIP 0x00000800
1197#define LmCRBLK 0x00000400
1198#define LmACK 0x00000200
1199#define LmNAK 0x00000100
1200#define LmHARDRST 0x00000080
1201#define LmERROR 0x00000040
1202#define LmRERR 0x00000020
1203#define LmPMREQP 0x00000010
1204#define LmPMREQS 0x00000008
1205#define LmPMACK 0x00000004
1206#define LmPMNAK 0x00000002
1207#define LmDMAT 0x00000001
1208
1209/* mode 1 */
1210#define LmMnSATAFS(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x7E)
1211#define LmMnXMTSIZE(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x93)
1212
1213/* mode 0 */
1214#define LmMnFRMERR(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0xB0)
1215
1216#define LmACRCERR 0x00000800
1217#define LmPHYOVRN 0x00000400
1218#define LmOBOVRN 0x00000200
1219#define LmMnZERODATA 0x00000100
1220#define LmSATAINTLK 0x00000080
1221#define LmMnCRCERR 0x00000020
1222#define LmRRDYOVRN 0x00000010
1223#define LmMISSSOAF 0x00000008
1224#define LmMISSSOF 0x00000004
1225#define LmMISSEOAF 0x00000002
1226#define LmMISSEOF 0x00000001
1227
1228#define LmFRMERREN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xB4)
1229
1230#define EN_LmACRCERR 0x00000800
1231#define EN_LmPHYOVRN 0x00000400
1232#define EN_LmOBOVRN 0x00000200
1233#define EN_LmMnZERODATA 0x00000100
1234#define EN_LmSATAINTLK 0x00000080
1235#define EN_LmFRMBAD 0x00000040
1236#define EN_LmMnCRCERR 0x00000020
1237#define EN_LmRRDYOVRN 0x00000010
1238#define EN_LmMISSSOAF 0x00000008
1239#define EN_LmMISSSOF 0x00000004
1240#define EN_LmMISSEOAF 0x00000002
1241#define EN_LmMISSEOF 0x00000001
1242
1243#define LmFRMERREN_MASK (EN_LmSATAINTLK | EN_LmMnCRCERR | \
1244 EN_LmRRDYOVRN | EN_LmMISSSOF | \
1245 EN_LmMISSEOAF | EN_LmMISSEOF | \
1246 EN_LmACRCERR | LmPHYOVRN | \
1247 EN_LmOBOVRN | EN_LmMnZERODATA)
1248
1249#define LmHWTSTATEN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xC5)
1250
1251#define EN_LmDONETO 0x80
1252#define EN_LmINVDISP 0x40
1253#define EN_LmINVDW 0x20
1254#define EN_LmDWSEVENT 0x08
1255#define EN_LmCRTTTO 0x04
1256#define EN_LmANTTTO 0x02
1257#define EN_LmBITLTTO 0x01
1258
1259#define LmHWTSTATEN_MASK (EN_LmINVDISP | EN_LmINVDW | \
1260 EN_LmDWSEVENT | EN_LmCRTTTO | \
1261 EN_LmANTTTO | EN_LmDONETO | \
1262 EN_LmBITLTTO)
1263
1264#define LmHWTSTAT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xC7)
1265
1266#define LmDONETO 0x80
1267#define LmINVDISP 0x40
1268#define LmINVDW 0x20
1269#define LmDWSEVENT 0x08
1270#define LmCRTTTO 0x04
1271#define LmANTTTO 0x02
1272#define LmBITLTTO 0x01
1273
1274#define LmMnDATABUFADR(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0xC8)
1275#define LmDATABUFADR_MASK 0x0FFF
1276
1277#define LmMnDATABUF(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0xCA)
1278
1279#define LmPRIMSTAT0EN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xE0)
1280
1281#define EN_LmUNKNOWNP 0x20000000
1282#define EN_LmBREAK 0x10000000
1283#define EN_LmDONE 0x08000000
1284#define EN_LmOPENACPT 0x04000000
1285#define EN_LmOPENRJCT 0x02000000
1286#define EN_LmOPENRTRY 0x01000000
1287#define EN_LmCLOSERV1 0x00800000
1288#define EN_LmCLOSERV0 0x00400000
1289#define EN_LmCLOSENORM 0x00200000
1290#define EN_LmCLOSECLAF 0x00100000
1291#define EN_LmNOTIFYRV2 0x00080000
1292#define EN_LmNOTIFYRV1 0x00040000
1293#define EN_LmNOTIFYRV0 0x00020000
1294#define EN_LmNOTIFYSPIN 0x00010000
1295#define EN_LmBROADRV4 0x00008000
1296#define EN_LmBROADRV3 0x00004000
1297#define EN_LmBROADRV2 0x00002000
1298#define EN_LmBROADRV1 0x00001000
1299#define EN_LmBROADRV0 0x00000800
1300#define EN_LmBROADRVCH1 0x00000400
1301#define EN_LmBROADRVCH0 0x00000200
1302#define EN_LmBROADCH 0x00000100
1303#define EN_LmAIPRVWP 0x00000080
1304#define EN_LmAIPWP 0x00000040
1305#define EN_LmAIPWD 0x00000020
1306#define EN_LmAIPWC 0x00000010
1307#define EN_LmAIPRV2 0x00000008
1308#define EN_LmAIPRV1 0x00000004
1309#define EN_LmAIPRV0 0x00000002
1310#define EN_LmAIPNRML 0x00000001
1311
1312#define LmPRIMSTAT0EN_MASK (EN_LmBREAK | \
1313 EN_LmDONE | EN_LmOPENACPT | \
1314 EN_LmOPENRJCT | EN_LmOPENRTRY | \
1315 EN_LmCLOSERV1 | EN_LmCLOSERV0 | \
1316 EN_LmCLOSENORM | EN_LmCLOSECLAF | \
1317 EN_LmBROADRV4 | EN_LmBROADRV3 | \
1318 EN_LmBROADRV2 | EN_LmBROADRV1 | \
1319 EN_LmBROADRV0 | EN_LmBROADRVCH1 | \
1320 EN_LmBROADRVCH0 | EN_LmBROADCH | \
1321 EN_LmAIPRVWP | EN_LmAIPWP | \
1322 EN_LmAIPWD | EN_LmAIPWC | \
1323 EN_LmAIPRV2 | EN_LmAIPRV1 | \
1324 EN_LmAIPRV0 | EN_LmAIPNRML)
1325
1326#define LmPRIMSTAT1EN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xE4)
1327
1328#define EN_LmXRDY 0x00040000
1329#define EN_LmSYNCSRST 0x00020000
1330#define EN_LmSYNC 0x00010000
1331#define EN_LmXHOLD 0x00008000
1332#define EN_LmRRDY 0x00004000
1333#define EN_LmHOLD 0x00002000
1334#define EN_LmROK 0x00001000
1335#define EN_LmRIP 0x00000800
1336#define EN_LmCRBLK 0x00000400
1337#define EN_LmACK 0x00000200
1338#define EN_LmNAK 0x00000100
1339#define EN_LmHARDRST 0x00000080
1340#define EN_LmERROR 0x00000040
1341#define EN_LmRERR 0x00000020
1342#define EN_LmPMREQP 0x00000010
1343#define EN_LmPMREQS 0x00000008
1344#define EN_LmPMACK 0x00000004
1345#define EN_LmPMNAK 0x00000002
1346#define EN_LmDMAT 0x00000001
1347
1348#define LmPRIMSTAT1EN_MASK (EN_LmHARDRST | \
1349 EN_LmSYNCSRST | \
1350 EN_LmPMREQP | EN_LmPMREQS | \
1351 EN_LmPMACK | EN_LmPMNAK)
1352
1353#define LmSMSTATE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xE8)
1354
1355#define LmSMSTATEBRK(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xEC)
1356
1357#define LmSMDBGCTL(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xF0)
1358
1359
1360/*
1361 * LmSEQ CIO Bus Mode 3 Register.
1362 * Mode 3: Configuration and Setup, IOP Context SCB.
1363 */
1364#define LmM3SATATIMER(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x48)
1365
1366#define LmM3INTVEC0(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x90)
1367
1368#define LmM3INTVEC1(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x92)
1369
1370#define LmM3INTVEC2(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x94)
1371
1372#define LmM3INTVEC3(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x96)
1373
1374#define LmM3INTVEC4(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x98)
1375
1376#define LmM3INTVEC5(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x9A)
1377
1378#define LmM3INTVEC6(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x9C)
1379
1380#define LmM3INTVEC7(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x9E)
1381
1382#define LmM3INTVEC8(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xA4)
1383
1384#define LmM3INTVEC9(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xA6)
1385
1386#define LmM3INTVEC10(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xB0)
1387
1388#define LmM3FRMGAP(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xB4)
1389
1390#define LmBITL_TIMER(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xA2)
1391
1392#define LmWWN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xA8)
1393
1394
1395/*
1396 * LmSEQ CIO Bus Mode 5 Registers.
1397 * Mode 5: Phy/OOB Control and Status.
1398 */
1399#define LmSEQ_OOB_REG(phy_id, reg) LmSEQ_PHY_REG(5, (phy_id), (reg))
1400
1401#define OOB_BFLTR 0x100
1402
1403#define BFLTR_THR_MASK 0xF0
1404#define BFLTR_TC_MASK 0x0F
1405
1406#define OOB_INIT_MIN 0x102
1407
1408#define OOB_INIT_MAX 0x104
1409
1410#define OOB_INIT_NEG 0x106
1411
1412#define OOB_SAS_MIN 0x108
1413
1414#define OOB_SAS_MAX 0x10A
1415
1416#define OOB_SAS_NEG 0x10C
1417
1418#define OOB_WAKE_MIN 0x10E
1419
1420#define OOB_WAKE_MAX 0x110
1421
1422#define OOB_WAKE_NEG 0x112
1423
1424#define OOB_IDLE_MAX 0x114
1425
1426#define OOB_BURST_MAX 0x116
1427
1428#define OOB_DATA_KBITS 0x126
1429
1430#define OOB_ALIGN_0_DATA 0x12C
1431
1432#define OOB_ALIGN_1_DATA 0x130
1433
1434#define D10_2_DATA_k 0x00
1435#define SYNC_DATA_k 0x02
1436#define ALIGN_1_DATA_k 0x04
1437#define ALIGN_0_DATA_k 0x08
1438#define BURST_DATA_k 0x10
1439
1440#define OOB_PHY_RESET_COUNT 0x13C
1441
1442#define OOB_SIG_GEN 0x140
1443
1444#define START_OOB 0x80
1445#define START_DWS 0x40
1446#define ALIGN_CNT3 0x30
1447#define ALIGN_CNT2 0x20
1448#define ALIGN_CNT1 0x10
1449#define ALIGN_CNT4 0x00
1450#define STOP_DWS 0x08
1451#define SEND_COMSAS 0x04
1452#define SEND_COMINIT 0x02
1453#define SEND_COMWAKE 0x01
1454
1455#define OOB_XMIT 0x141
1456
1457#define TX_ENABLE 0x80
1458#define XMIT_OOB_BURST 0x10
1459#define XMIT_D10_2 0x08
1460#define XMIT_SYNC 0x04
1461#define XMIT_ALIGN_1 0x02
1462#define XMIT_ALIGN_0 0x01
1463
1464#define FUNCTION_MASK 0x142
1465
1466#define SAS_MODE_DIS 0x80
1467#define SATA_MODE_DIS 0x40
1468#define SPINUP_HOLD_DIS 0x20
1469#define HOT_PLUG_DIS 0x10
1470#define SATA_PS_DIS 0x08
1471#define FUNCTION_MASK_DEFAULT (SPINUP_HOLD_DIS | SATA_PS_DIS)
1472
1473#define OOB_MODE 0x143
1474
1475#define SAS_MODE 0x80
1476#define SATA_MODE 0x40
1477#define SLOW_CLK 0x20
1478#define FORCE_XMIT_15 0x08
1479#define PHY_SPEED_60 0x04
1480#define PHY_SPEED_30 0x02
1481#define PHY_SPEED_15 0x01
1482
1483#define CURRENT_STATUS 0x144
1484
1485#define CURRENT_OOB_DONE 0x80
1486#define CURRENT_LOSS_OF_SIGNAL 0x40
1487#define CURRENT_SPINUP_HOLD 0x20
1488#define CURRENT_HOT_PLUG_CNCT 0x10
1489#define CURRENT_GTO_TIMEOUT 0x08
1490#define CURRENT_OOB_TIMEOUT 0x04
1491#define CURRENT_DEVICE_PRESENT 0x02
1492#define CURRENT_OOB_ERROR 0x01
1493
1494#define CURRENT_OOB1_ERROR (CURRENT_HOT_PLUG_CNCT | \
1495 CURRENT_GTO_TIMEOUT)
1496
1497#define CURRENT_OOB2_ERROR (CURRENT_HOT_PLUG_CNCT | \
1498 CURRENT_OOB_ERROR)
1499
1500#define DEVICE_ADDED_W_CNT (CURRENT_OOB_DONE | \
1501 CURRENT_HOT_PLUG_CNCT | \
1502 CURRENT_DEVICE_PRESENT)
1503
1504#define DEVICE_ADDED_WO_CNT (CURRENT_OOB_DONE | \
1505 CURRENT_DEVICE_PRESENT)
1506
1507#define DEVICE_REMOVED CURRENT_LOSS_OF_SIGNAL
1508
1509#define CURRENT_PHY_MASK (CURRENT_OOB_DONE | \
1510 CURRENT_LOSS_OF_SIGNAL | \
1511 CURRENT_SPINUP_HOLD | \
1512 CURRENT_HOT_PLUG_CNCT | \
1513 CURRENT_GTO_TIMEOUT | \
1514 CURRENT_DEVICE_PRESENT | \
1515 CURRENT_OOB_ERROR )
1516
1517#define CURRENT_ERR_MASK (CURRENT_LOSS_OF_SIGNAL | \
1518 CURRENT_GTO_TIMEOUT | \
1519 CURRENT_OOB_TIMEOUT | \
1520 CURRENT_OOB_ERROR )
1521
1522#define SPEED_MASK 0x145
1523
1524#define SATA_SPEED_30_DIS 0x10
1525#define SATA_SPEED_15_DIS 0x08
1526#define SAS_SPEED_60_DIS 0x04
1527#define SAS_SPEED_30_DIS 0x02
1528#define SAS_SPEED_15_DIS 0x01
1529#define SAS_SPEED_MASK_DEFAULT 0x00
1530
1531#define OOB_TIMER_ENABLE 0x14D
1532
1533#define HOT_PLUG_EN 0x80
1534#define RCD_EN 0x40
1535#define COMTIMER_EN 0x20
1536#define SNTT_EN 0x10
1537#define SNLT_EN 0x04
1538#define SNWT_EN 0x02
1539#define ALIGN_EN 0x01
1540
1541#define OOB_STATUS 0x14E
1542
1543#define OOB_DONE 0x80
1544#define LOSS_OF_SIGNAL 0x40 /* ro */
1545#define SPINUP_HOLD 0x20
1546#define HOT_PLUG_CNCT 0x10 /* ro */
1547#define GTO_TIMEOUT 0x08 /* ro */
1548#define OOB_TIMEOUT 0x04 /* ro */
1549#define DEVICE_PRESENT 0x02 /* ro */
1550#define OOB_ERROR 0x01 /* ro */
1551
1552#define OOB_STATUS_ERROR_MASK (LOSS_OF_SIGNAL | GTO_TIMEOUT | \
1553 OOB_TIMEOUT | OOB_ERROR)
1554
1555#define OOB_STATUS_CLEAR 0x14F
1556
1557#define OOB_DONE_CLR 0x80
1558#define LOSS_OF_SIGNAL_CLR 0x40
1559#define SPINUP_HOLD_CLR 0x20
1560#define HOT_PLUG_CNCT_CLR 0x10
1561#define GTO_TIMEOUT_CLR 0x08
1562#define OOB_TIMEOUT_CLR 0x04
1563#define OOB_ERROR_CLR 0x01
1564
1565#define HOT_PLUG_DELAY 0x150
1566/* In 5 ms units. 20 = 100 ms. */
1567#define HOTPLUG_DELAY_TIMEOUT 20
1568
1569
1570#define INT_ENABLE_2 0x15A
1571
1572#define OOB_DONE_EN 0x80
1573#define LOSS_OF_SIGNAL_EN 0x40
1574#define SPINUP_HOLD_EN 0x20
1575#define HOT_PLUG_CNCT_EN 0x10
1576#define GTO_TIMEOUT_EN 0x08
1577#define OOB_TIMEOUT_EN 0x04
1578#define DEVICE_PRESENT_EN 0x02
1579#define OOB_ERROR_EN 0x01
1580
1581#define PHY_CONTROL_0 0x160
1582
1583#define PHY_LOWPWREN_TX 0x80
1584#define PHY_LOWPWREN_RX 0x40
1585#define SPARE_REG_160_B5 0x20
1586#define OFFSET_CANCEL_RX 0x10
1587
1588/* bits 3:2 */
1589#define PHY_RXCOMCENTER_60V 0x00
1590#define PHY_RXCOMCENTER_70V 0x04
1591#define PHY_RXCOMCENTER_80V 0x08
1592#define PHY_RXCOMCENTER_90V 0x0C
1593#define PHY_RXCOMCENTER_MASK 0x0C
1594
1595#define PHY_RESET 0x02
1596#define SAS_DEFAULT_SEL 0x01
1597
1598#define PHY_CONTROL_1 0x161
1599
1600/* bits 2:0 */
1601#define SATA_PHY_DETLEVEL_50mv 0x00
1602#define SATA_PHY_DETLEVEL_75mv 0x01
1603#define SATA_PHY_DETLEVEL_100mv 0x02
1604#define SATA_PHY_DETLEVEL_125mv 0x03
1605#define SATA_PHY_DETLEVEL_150mv 0x04
1606#define SATA_PHY_DETLEVEL_175mv 0x05
1607#define SATA_PHY_DETLEVEL_200mv 0x06
1608#define SATA_PHY_DETLEVEL_225mv 0x07
1609#define SATA_PHY_DETLEVEL_MASK 0x07
1610
1611/* bits 5:3 */
1612#define SAS_PHY_DETLEVEL_50mv 0x00
1613#define SAS_PHY_DETLEVEL_75mv 0x08
1614#define SAS_PHY_DETLEVEL_100mv 0x10
1615#define SAS_PHY_DETLEVEL_125mv 0x11
1616#define SAS_PHY_DETLEVEL_150mv 0x20
1617#define SAS_PHY_DETLEVEL_175mv 0x21
1618#define SAS_PHY_DETLEVEL_200mv 0x30
1619#define SAS_PHY_DETLEVEL_225mv 0x31
1620#define SAS_PHY_DETLEVEL_MASK 0x38
1621
1622#define PHY_CONTROL_2 0x162
1623
1624/* bits 7:5 */
1625#define SATA_PHY_DRV_400mv 0x00
1626#define SATA_PHY_DRV_450mv 0x20
1627#define SATA_PHY_DRV_500mv 0x40
1628#define SATA_PHY_DRV_550mv 0x60
1629#define SATA_PHY_DRV_600mv 0x80
1630#define SATA_PHY_DRV_650mv 0xA0
1631#define SATA_PHY_DRV_725mv 0xC0
1632#define SATA_PHY_DRV_800mv 0xE0
1633#define SATA_PHY_DRV_MASK 0xE0
1634
1635/* bits 4:3 */
1636#define SATA_PREEMP_0 0x00
1637#define SATA_PREEMP_1 0x08
1638#define SATA_PREEMP_2 0x10
1639#define SATA_PREEMP_3 0x18
1640#define SATA_PREEMP_MASK 0x18
1641
1642#define SATA_CMSH1P5 0x04
1643
1644/* bits 1:0 */
1645#define SATA_SLEW_0 0x00
1646#define SATA_SLEW_1 0x01
1647#define SATA_SLEW_2 0x02
1648#define SATA_SLEW_3 0x03
1649#define SATA_SLEW_MASK 0x03
1650
1651#define PHY_CONTROL_3 0x163
1652
1653/* bits 7:5 */
1654#define SAS_PHY_DRV_400mv 0x00
1655#define SAS_PHY_DRV_450mv 0x20
1656#define SAS_PHY_DRV_500mv 0x40
1657#define SAS_PHY_DRV_550mv 0x60
1658#define SAS_PHY_DRV_600mv 0x80
1659#define SAS_PHY_DRV_650mv 0xA0
1660#define SAS_PHY_DRV_725mv 0xC0
1661#define SAS_PHY_DRV_800mv 0xE0
1662#define SAS_PHY_DRV_MASK 0xE0
1663
1664/* bits 4:3 */
1665#define SAS_PREEMP_0 0x00
1666#define SAS_PREEMP_1 0x08
1667#define SAS_PREEMP_2 0x10
1668#define SAS_PREEMP_3 0x18
1669#define SAS_PREEMP_MASK 0x18
1670
1671#define SAS_CMSH1P5 0x04
1672
1673/* bits 1:0 */
1674#define SAS_SLEW_0 0x00
1675#define SAS_SLEW_1 0x01
1676#define SAS_SLEW_2 0x02
1677#define SAS_SLEW_3 0x03
1678#define SAS_SLEW_MASK 0x03
1679
1680#define PHY_CONTROL_4 0x168
1681
1682#define PHY_DONE_CAL_TX 0x80
1683#define PHY_DONE_CAL_RX 0x40
1684#define RX_TERM_LOAD_DIS 0x20
1685#define TX_TERM_LOAD_DIS 0x10
1686#define AUTO_TERM_CAL_DIS 0x08
1687#define PHY_SIGDET_FLTR_EN 0x04
1688#define OSC_FREQ 0x02
1689#define PHY_START_CAL 0x01
1690
1691/*
1692 * HST_PCIX2 Registers, Addresss Range: (0x00-0xFC)
1693 */
1694#define PCIX_REG_BASE_ADR 0xB8040000
1695
1696#define PCIC_VENDOR_ID 0x00
1697
1698#define PCIC_DEVICE_ID 0x02
1699
1700#define PCIC_COMMAND 0x04
1701
1702#define INT_DIS 0x0400
1703#define FBB_EN 0x0200 /* ro */
1704#define SERR_EN 0x0100
1705#define STEP_EN 0x0080 /* ro */
1706#define PERR_EN 0x0040
1707#define VGA_EN 0x0020 /* ro */
1708#define MWI_EN 0x0010
1709#define SPC_EN 0x0008
1710#define MST_EN 0x0004
1711#define MEM_EN 0x0002
1712#define IO_EN 0x0001
1713
1714#define PCIC_STATUS 0x06
1715
1716#define PERR_DET 0x8000
1717#define SERR_GEN 0x4000
1718#define MABT_DET 0x2000
1719#define TABT_DET 0x1000
1720#define TABT_GEN 0x0800
1721#define DPERR_DET 0x0100
1722#define CAP_LIST 0x0010
1723#define INT_STAT 0x0008
1724
1725#define PCIC_DEVREV_ID 0x08
1726
1727#define PCIC_CLASS_CODE 0x09
1728
1729#define PCIC_CACHELINE_SIZE 0x0C
1730
1731#define PCIC_MBAR0 0x10
1732
1733#define PCIC_MBAR0_OFFSET 0
1734
1735#define PCIC_MBAR1 0x18
1736
1737#define PCIC_MBAR1_OFFSET 2
1738
1739#define PCIC_IOBAR 0x20
1740
1741#define PCIC_IOBAR_OFFSET 4
1742
1743#define PCIC_SUBVENDOR_ID 0x2C
1744
1745#define PCIC_SUBSYTEM_ID 0x2E
1746
1747#define PCIX_STATUS 0x44
1748#define RCV_SCE 0x20000000
1749#define UNEXP_SC 0x00080000
1750#define SC_DISCARD 0x00040000
1751
1752#define ECC_CTRL_STAT 0x48
1753#define UNCOR_ECCERR 0x00000008
1754
1755#define PCIC_PM_CSR 0x5C
1756
1757#define PWR_STATE_D0 0
1758#define PWR_STATE_D1 1 /* not supported */
1759#define PWR_STATE_D2 2 /* not supported */
1760#define PWR_STATE_D3 3
1761
1762#define PCIC_BASE1 0x6C /* internal use only */
1763
1764#define BASE1_RSVD 0xFFFFFFF8
1765
1766#define PCIC_BASEA 0x70 /* internal use only */
1767
1768#define BASEA_RSVD 0xFFFFFFC0
1769#define BASEA_START 0
1770
1771#define PCIC_BASEB 0x74 /* internal use only */
1772
1773#define BASEB_RSVD 0xFFFFFF80
1774#define BASEB_IOMAP_MASK 0x7F
1775#define BASEB_START 0x80
1776
1777#define PCIC_BASEC 0x78 /* internal use only */
1778
1779#define BASEC_RSVD 0xFFFFFFFC
1780#define BASEC_MASK 0x03
1781#define BASEC_START 0x58
1782
1783#define PCIC_MBAR_KEY 0x7C /* internal use only */
1784
1785#define MBAR_KEY_MASK 0xFFFFFFFF
1786
1787#define PCIC_HSTPCIX_CNTRL 0xA0
1788
1789#define REWIND_DIS 0x0800
1790#define SC_TMR_DIS 0x04000000
1791
1792#define PCIC_MBAR0_MASK 0xA8
1793#define PCIC_MBAR0_SIZE_MASK 0x1FFFE000
1794#define PCIC_MBAR0_SIZE_SHIFT 13
1795#define PCIC_MBAR0_SIZE(val) \
1796 (((val) & PCIC_MBAR0_SIZE_MASK) >> PCIC_MBAR0_SIZE_SHIFT)
1797
1798#define PCIC_FLASH_MBAR 0xB8
1799
1800#define PCIC_INTRPT_STAT 0xD4
1801
1802#define PCIC_TP_CTRL 0xFC
1803
1804/*
1805 * EXSI Registers, Addresss Range: (0x00-0xFC)
1806 */
1807#define EXSI_REG_BASE_ADR REG_BASE_ADDR_EXSI
1808
1809#define EXSICNFGR (EXSI_REG_BASE_ADR + 0x00)
1810
1811#define OCMINITIALIZED 0x80000000
1812#define ASIEN 0x00400000
1813#define HCMODE 0x00200000
1814#define PCIDEF 0x00100000
1815#define COMSTOCK 0x00080000
1816#define SEEPROMEND 0x00040000
1817#define MSTTIMEN 0x00020000
1818#define XREGEX 0x00000200
1819#define NVRAMW 0x00000100
1820#define NVRAMEX 0x00000080
1821#define SRAMW 0x00000040
1822#define SRAMEX 0x00000020
1823#define FLASHW 0x00000010
1824#define FLASHEX 0x00000008
1825#define SEEPROMCFG 0x00000004
1826#define SEEPROMTYP 0x00000002
1827#define SEEPROMEX 0x00000001
1828
1829
1830#define EXSICNTRLR (EXSI_REG_BASE_ADR + 0x04)
1831
1832#define MODINT_EN 0x00000001
1833
1834
1835#define PMSTATR (EXSI_REG_BASE_ADR + 0x10)
1836
1837#define FLASHRST 0x00000002
1838#define FLASHRDY 0x00000001
1839
1840
1841#define FLCNFGR (EXSI_REG_BASE_ADR + 0x14)
1842
1843#define FLWEH_MASK 0x30000000
1844#define FLWESU_MASK 0x0C000000
1845#define FLWEPW_MASK 0x03F00000
1846#define FLOEH_MASK 0x000C0000
1847#define FLOESU_MASK 0x00030000
1848#define FLOEPW_MASK 0x0000FC00
1849#define FLCSH_MASK 0x00000300
1850#define FLCSSU_MASK 0x000000C0
1851#define FLCSPW_MASK 0x0000003F
1852
1853#define SRCNFGR (EXSI_REG_BASE_ADR + 0x18)
1854
1855#define SRWEH_MASK 0x30000000
1856#define SRWESU_MASK 0x0C000000
1857#define SRWEPW_MASK 0x03F00000
1858
1859#define SROEH_MASK 0x000C0000
1860#define SROESU_MASK 0x00030000
1861#define SROEPW_MASK 0x0000FC00
1862#define SRCSH_MASK 0x00000300
1863#define SRCSSU_MASK 0x000000C0
1864#define SRCSPW_MASK 0x0000003F
1865
1866#define NVCNFGR (EXSI_REG_BASE_ADR + 0x1C)
1867
1868#define NVWEH_MASK 0x30000000
1869#define NVWESU_MASK 0x0C000000
1870#define NVWEPW_MASK 0x03F00000
1871#define NVOEH_MASK 0x000C0000
1872#define NVOESU_MASK 0x00030000
1873#define NVOEPW_MASK 0x0000FC00
1874#define NVCSH_MASK 0x00000300
1875#define NVCSSU_MASK 0x000000C0
1876#define NVCSPW_MASK 0x0000003F
1877
1878#define XRCNFGR (EXSI_REG_BASE_ADR + 0x20)
1879
1880#define XRWEH_MASK 0x30000000
1881#define XRWESU_MASK 0x0C000000
1882#define XRWEPW_MASK 0x03F00000
1883#define XROEH_MASK 0x000C0000
1884#define XROESU_MASK 0x00030000
1885#define XROEPW_MASK 0x0000FC00
1886#define XRCSH_MASK 0x00000300
1887#define XRCSSU_MASK 0x000000C0
1888#define XRCSPW_MASK 0x0000003F
1889
1890#define XREGADDR (EXSI_REG_BASE_ADR + 0x24)
1891
1892#define XRADDRINCEN 0x80000000
1893#define XREGADD_MASK 0x007FFFFF
1894
1895
1896#define XREGDATAR (EXSI_REG_BASE_ADR + 0x28)
1897
1898#define XREGDATA_MASK 0x0000FFFF
1899
1900#define GPIOOER (EXSI_REG_BASE_ADR + 0x40)
1901
1902#define GPIOODENR (EXSI_REG_BASE_ADR + 0x44)
1903
1904#define GPIOINVR (EXSI_REG_BASE_ADR + 0x48)
1905
1906#define GPIODATAOR (EXSI_REG_BASE_ADR + 0x4C)
1907
1908#define GPIODATAIR (EXSI_REG_BASE_ADR + 0x50)
1909
1910#define GPIOCNFGR (EXSI_REG_BASE_ADR + 0x54)
1911
1912#define GPIO_EXTSRC 0x00000001
1913
1914#define SCNTRLR (EXSI_REG_BASE_ADR + 0xA0)
1915
1916#define SXFERDONE 0x00000100
1917#define SXFERCNT_MASK 0x000000E0
1918#define SCMDTYP_MASK 0x0000001C
1919#define SXFERSTART 0x00000002
1920#define SXFEREN 0x00000001
1921
1922#define SRATER (EXSI_REG_BASE_ADR + 0xA4)
1923
1924#define SADDRR (EXSI_REG_BASE_ADR + 0xA8)
1925
1926#define SADDR_MASK 0x0000FFFF
1927
1928#define SDATAOR (EXSI_REG_BASE_ADR + 0xAC)
1929
1930#define SDATAOR0 (EXSI_REG_BASE_ADR + 0xAC)
1931#define SDATAOR1 (EXSI_REG_BASE_ADR + 0xAD)
1932#define SDATAOR2 (EXSI_REG_BASE_ADR + 0xAE)
1933#define SDATAOR3 (EXSI_REG_BASE_ADR + 0xAF)
1934
1935#define SDATAIR (EXSI_REG_BASE_ADR + 0xB0)
1936
1937#define SDATAIR0 (EXSI_REG_BASE_ADR + 0xB0)
1938#define SDATAIR1 (EXSI_REG_BASE_ADR + 0xB1)
1939#define SDATAIR2 (EXSI_REG_BASE_ADR + 0xB2)
1940#define SDATAIR3 (EXSI_REG_BASE_ADR + 0xB3)
1941
1942#define ASISTAT0R (EXSI_REG_BASE_ADR + 0xD0)
1943#define ASIFMTERR 0x00000400
1944#define ASISEECHKERR 0x00000200
1945#define ASIERR 0x00000100
1946
1947#define ASISTAT1R (EXSI_REG_BASE_ADR + 0xD4)
1948#define CHECKSUM_MASK 0x0000FFFF
1949
1950#define ASIERRADDR (EXSI_REG_BASE_ADR + 0xD8)
1951#define ASIERRDATAR (EXSI_REG_BASE_ADR + 0xDC)
1952#define ASIERRSTATR (EXSI_REG_BASE_ADR + 0xE0)
1953#define CPI2ASIBYTECNT_MASK 0x00070000
1954#define CPI2ASIBYTEEN_MASK 0x0000F000
1955#define CPI2ASITARGERR_MASK 0x00000F00
1956#define CPI2ASITARGMID_MASK 0x000000F0
1957#define CPI2ASIMSTERR_MASK 0x0000000F
1958
1959/*
1960 * XSRAM, External SRAM (DWord and any BE pattern accessible)
1961 */
1962#define XSRAM_REG_BASE_ADDR 0xB8100000
1963#define XSRAM_SIZE 0x100000
1964
1965/*
1966 * NVRAM Registers, Address Range: (0x00000 - 0x3FFFF).
1967 */
1968#define NVRAM_REG_BASE_ADR 0xBF800000
1969#define NVRAM_MAX_BASE_ADR 0x003FFFFF
1970
1971/* OCM base address */
1972#define OCM_BASE_ADDR 0xA0000000
1973#define OCM_MAX_SIZE 0x20000
1974
1975/*
1976 * Sequencers (Central and Link) Scratch RAM page definitions.
1977 */
1978
1979/*
1980 * The Central Management Sequencer (CSEQ) Scratch Memory is a 1024
1981 * byte memory. It is dword accessible and has byte parity
1982 * protection. The CSEQ accesses it in 32 byte windows, either as mode
1983 * dependent or mode independent memory. Each mode has 96 bytes,
1984 * (three 32 byte pages 0-2, not contiguous), leaving 128 bytes of
1985 * Mode Independent memory (four 32 byte pages 3-7). Note that mode
1986 * dependent scratch memory, Mode 8, page 0-3 overlaps mode
1987 * independent scratch memory, pages 0-3.
1988 * - 896 bytes of mode dependent scratch, 96 bytes per Modes 0-7, and
1989 * 128 bytes in mode 8,
1990 * - 259 bytes of mode independent scratch, common to modes 0-15.
1991 *
1992 * Sequencer scratch RAM is 1024 bytes. This scratch memory is
1993 * divided into mode dependent and mode independent scratch with this
1994 * memory further subdivided into pages of size 32 bytes. There are 5
1995 * pages (160 bytes) of mode independent scratch and 3 pages of
1996 * dependent scratch memory for modes 0-7 (768 bytes). Mode 8 pages
1997 * 0-2 dependent scratch overlap with pages 0-2 of mode independent
1998 * scratch memory.
1999 *
2000 * The host accesses this scratch in a different manner from the
2001 * central sequencer. The sequencer has to use CSEQ registers CSCRPAGE
2002 * and CMnSCRPAGE to access the scratch memory. A flat mapping of the
2003 * scratch memory is avaliable for software convenience and to prevent
2004 * corruption while the sequencer is running. This memory is mapped
2005 * onto addresses 800h - BFFh, total of 400h bytes.
2006 *
2007 * These addresses are mapped as follows:
2008 *
2009 * 800h-83Fh Mode Dependent Scratch Mode 0 Pages 0-1
2010 * 840h-87Fh Mode Dependent Scratch Mode 1 Pages 0-1
2011 * 880h-8BFh Mode Dependent Scratch Mode 2 Pages 0-1
2012 * 8C0h-8FFh Mode Dependent Scratch Mode 3 Pages 0-1
2013 * 900h-93Fh Mode Dependent Scratch Mode 4 Pages 0-1
2014 * 940h-97Fh Mode Dependent Scratch Mode 5 Pages 0-1
2015 * 980h-9BFh Mode Dependent Scratch Mode 6 Pages 0-1
2016 * 9C0h-9FFh Mode Dependent Scratch Mode 7 Pages 0-1
2017 * A00h-A5Fh Mode Dependent Scratch Mode 8 Pages 0-2
2018 * Mode Independent Scratch Pages 0-2
2019 * A60h-A7Fh Mode Dependent Scratch Mode 8 Page 3
2020 * Mode Independent Scratch Page 3
2021 * A80h-AFFh Mode Independent Scratch Pages 4-7
2022 * B00h-B1Fh Mode Dependent Scratch Mode 0 Page 2
2023 * B20h-B3Fh Mode Dependent Scratch Mode 1 Page 2
2024 * B40h-B5Fh Mode Dependent Scratch Mode 2 Page 2
2025 * B60h-B7Fh Mode Dependent Scratch Mode 3 Page 2
2026 * B80h-B9Fh Mode Dependent Scratch Mode 4 Page 2
2027 * BA0h-BBFh Mode Dependent Scratch Mode 5 Page 2
2028 * BC0h-BDFh Mode Dependent Scratch Mode 6 Page 2
2029 * BE0h-BFFh Mode Dependent Scratch Mode 7 Page 2
2030 */
2031
2032/* General macros */
2033#define CSEQ_PAGE_SIZE 32 /* Scratch page size (in bytes) */
2034
2035/* All macros start with offsets from base + 0x800 (CMAPPEDSCR).
2036 * Mode dependent scratch page 0, mode 0.
2037 * For modes 1-7 you have to do arithmetic. */
2038#define CSEQ_LRM_SAVE_SINDEX (CMAPPEDSCR + 0x0000)
2039#define CSEQ_LRM_SAVE_SCBPTR (CMAPPEDSCR + 0x0002)
2040#define CSEQ_Q_LINK_HEAD (CMAPPEDSCR + 0x0004)
2041#define CSEQ_Q_LINK_TAIL (CMAPPEDSCR + 0x0006)
2042#define CSEQ_LRM_SAVE_SCRPAGE (CMAPPEDSCR + 0x0008)
2043
2044/* Mode dependent scratch page 0 mode 8 macros. */
2045#define CSEQ_RET_ADDR (CMAPPEDSCR + 0x0200)
2046#define CSEQ_RET_SCBPTR (CMAPPEDSCR + 0x0202)
2047#define CSEQ_SAVE_SCBPTR (CMAPPEDSCR + 0x0204)
2048#define CSEQ_EMPTY_TRANS_CTX (CMAPPEDSCR + 0x0206)
2049#define CSEQ_RESP_LEN (CMAPPEDSCR + 0x0208)
2050#define CSEQ_TMF_SCBPTR (CMAPPEDSCR + 0x020A)
2051#define CSEQ_GLOBAL_PREV_SCB (CMAPPEDSCR + 0x020C)
2052#define CSEQ_GLOBAL_HEAD (CMAPPEDSCR + 0x020E)
2053#define CSEQ_CLEAR_LU_HEAD (CMAPPEDSCR + 0x0210)
2054#define CSEQ_TMF_OPCODE (CMAPPEDSCR + 0x0212)
2055#define CSEQ_SCRATCH_FLAGS (CMAPPEDSCR + 0x0213)
2056#define CSEQ_HSB_SITE (CMAPPEDSCR + 0x021A)
2057#define CSEQ_FIRST_INV_SCB_SITE (CMAPPEDSCR + 0x021C)
2058#define CSEQ_FIRST_INV_DDB_SITE (CMAPPEDSCR + 0x021E)
2059
2060/* Mode dependent scratch page 1 mode 8 macros. */
2061#define CSEQ_LUN_TO_CLEAR (CMAPPEDSCR + 0x0220)
2062#define CSEQ_LUN_TO_CHECK (CMAPPEDSCR + 0x0228)
2063
2064/* Mode dependent scratch page 2 mode 8 macros */
2065#define CSEQ_HQ_NEW_POINTER (CMAPPEDSCR + 0x0240)
2066#define CSEQ_HQ_DONE_BASE (CMAPPEDSCR + 0x0248)
2067#define CSEQ_HQ_DONE_POINTER (CMAPPEDSCR + 0x0250)
2068#define CSEQ_HQ_DONE_PASS (CMAPPEDSCR + 0x0254)
2069
2070/* Mode independent scratch page 4 macros. */
2071#define CSEQ_Q_EXE_HEAD (CMAPPEDSCR + 0x0280)
2072#define CSEQ_Q_EXE_TAIL (CMAPPEDSCR + 0x0282)
2073#define CSEQ_Q_DONE_HEAD (CMAPPEDSCR + 0x0284)
2074#define CSEQ_Q_DONE_TAIL (CMAPPEDSCR + 0x0286)
2075#define CSEQ_Q_SEND_HEAD (CMAPPEDSCR + 0x0288)
2076#define CSEQ_Q_SEND_TAIL (CMAPPEDSCR + 0x028A)
2077#define CSEQ_Q_DMA2CHIM_HEAD (CMAPPEDSCR + 0x028C)
2078#define CSEQ_Q_DMA2CHIM_TAIL (CMAPPEDSCR + 0x028E)
2079#define CSEQ_Q_COPY_HEAD (CMAPPEDSCR + 0x0290)
2080#define CSEQ_Q_COPY_TAIL (CMAPPEDSCR + 0x0292)
2081#define CSEQ_REG0 (CMAPPEDSCR + 0x0294)
2082#define CSEQ_REG1 (CMAPPEDSCR + 0x0296)
2083#define CSEQ_REG2 (CMAPPEDSCR + 0x0298)
2084#define CSEQ_LINK_CTL_Q_MAP (CMAPPEDSCR + 0x029C)
2085#define CSEQ_MAX_CSEQ_MODE (CMAPPEDSCR + 0x029D)
2086#define CSEQ_FREE_LIST_HACK_COUNT (CMAPPEDSCR + 0x029E)
2087
2088/* Mode independent scratch page 5 macros. */
2089#define CSEQ_EST_NEXUS_REQ_QUEUE (CMAPPEDSCR + 0x02A0)
2090#define CSEQ_EST_NEXUS_REQ_COUNT (CMAPPEDSCR + 0x02A8)
2091#define CSEQ_Q_EST_NEXUS_HEAD (CMAPPEDSCR + 0x02B0)
2092#define CSEQ_Q_EST_NEXUS_TAIL (CMAPPEDSCR + 0x02B2)
2093#define CSEQ_NEED_EST_NEXUS_SCB (CMAPPEDSCR + 0x02B4)
2094#define CSEQ_EST_NEXUS_REQ_HEAD (CMAPPEDSCR + 0x02B6)
2095#define CSEQ_EST_NEXUS_REQ_TAIL (CMAPPEDSCR + 0x02B7)
2096#define CSEQ_EST_NEXUS_SCB_OFFSET (CMAPPEDSCR + 0x02B8)
2097
2098/* Mode independent scratch page 6 macros. */
2099#define CSEQ_INT_ROUT_RET_ADDR0 (CMAPPEDSCR + 0x02C0)
2100#define CSEQ_INT_ROUT_RET_ADDR1 (CMAPPEDSCR + 0x02C2)
2101#define CSEQ_INT_ROUT_SCBPTR (CMAPPEDSCR + 0x02C4)
2102#define CSEQ_INT_ROUT_MODE (CMAPPEDSCR + 0x02C6)
2103#define CSEQ_ISR_SCRATCH_FLAGS (CMAPPEDSCR + 0x02C7)
2104#define CSEQ_ISR_SAVE_SINDEX (CMAPPEDSCR + 0x02C8)
2105#define CSEQ_ISR_SAVE_DINDEX (CMAPPEDSCR + 0x02CA)
2106#define CSEQ_Q_MONIRTT_HEAD (CMAPPEDSCR + 0x02D0)
2107#define CSEQ_Q_MONIRTT_TAIL (CMAPPEDSCR + 0x02D2)
2108#define CSEQ_FREE_SCB_MASK (CMAPPEDSCR + 0x02D5)
2109#define CSEQ_BUILTIN_FREE_SCB_HEAD (CMAPPEDSCR + 0x02D6)
2110#define CSEQ_BUILTIN_FREE_SCB_TAIL (CMAPPEDSCR + 0x02D8)
2111#define CSEQ_EXTENDED_FREE_SCB_HEAD (CMAPPEDSCR + 0x02DA)
2112#define CSEQ_EXTENDED_FREE_SCB_TAIL (CMAPPEDSCR + 0x02DC)
2113
2114/* Mode independent scratch page 7 macros. */
2115#define CSEQ_EMPTY_REQ_QUEUE (CMAPPEDSCR + 0x02E0)
2116#define CSEQ_EMPTY_REQ_COUNT (CMAPPEDSCR + 0x02E8)
2117#define CSEQ_Q_EMPTY_HEAD (CMAPPEDSCR + 0x02F0)
2118#define CSEQ_Q_EMPTY_TAIL (CMAPPEDSCR + 0x02F2)
2119#define CSEQ_NEED_EMPTY_SCB (CMAPPEDSCR + 0x02F4)
2120#define CSEQ_EMPTY_REQ_HEAD (CMAPPEDSCR + 0x02F6)
2121#define CSEQ_EMPTY_REQ_TAIL (CMAPPEDSCR + 0x02F7)
2122#define CSEQ_EMPTY_SCB_OFFSET (CMAPPEDSCR + 0x02F8)
2123#define CSEQ_PRIMITIVE_DATA (CMAPPEDSCR + 0x02FA)
2124#define CSEQ_TIMEOUT_CONST (CMAPPEDSCR + 0x02FC)
2125
2126/***************************************************************************
2127* Link m Sequencer scratch RAM is 512 bytes.
2128* This scratch memory is divided into mode dependent and mode
2129* independent scratch with this memory further subdivided into
2130* pages of size 32 bytes. There are 4 pages (128 bytes) of
2131* mode independent scratch and 4 pages of dependent scratch
2132* memory for modes 0-2 (384 bytes).
2133*
2134* The host accesses this scratch in a different manner from the
2135* link sequencer. The sequencer has to use LSEQ registers
2136* LmSCRPAGE and LmMnSCRPAGE to access the scratch memory. A flat
2137* mapping of the scratch memory is avaliable for software
2138* convenience and to prevent corruption while the sequencer is
2139* running. This memory is mapped onto addresses 800h - 9FFh.
2140*
2141* These addresses are mapped as follows:
2142*
2143* 800h-85Fh Mode Dependent Scratch Mode 0 Pages 0-2
2144* 860h-87Fh Mode Dependent Scratch Mode 0 Page 3
2145* Mode Dependent Scratch Mode 5 Page 0
2146* 880h-8DFh Mode Dependent Scratch Mode 1 Pages 0-2
2147* 8E0h-8FFh Mode Dependent Scratch Mode 1 Page 3
2148* Mode Dependent Scratch Mode 5 Page 1
2149* 900h-95Fh Mode Dependent Scratch Mode 2 Pages 0-2
2150* 960h-97Fh Mode Dependent Scratch Mode 2 Page 3
2151* Mode Dependent Scratch Mode 5 Page 2
2152* 980h-9DFh Mode Independent Scratch Pages 0-3
2153* 9E0h-9FFh Mode Independent Scratch Page 3
2154* Mode Dependent Scratch Mode 5 Page 3
2155*
2156****************************************************************************/
2157/* General macros */
2158#define LSEQ_MODE_SCRATCH_SIZE 0x80 /* Size of scratch RAM per mode */
2159#define LSEQ_PAGE_SIZE 0x20 /* Scratch page size (in bytes) */
2160#define LSEQ_MODE5_PAGE0_OFFSET 0x60
2161
2162/* Common mode dependent scratch page 0 macros for modes 0,1,2, and 5 */
2163/* Indexed using LSEQ_MODE_SCRATCH_SIZE * mode, for modes 0,1,2. */
2164#define LmSEQ_RET_ADDR(LinkNum) (LmSCRATCH(LinkNum) + 0x0000)
2165#define LmSEQ_REG0_MODE(LinkNum) (LmSCRATCH(LinkNum) + 0x0002)
2166#define LmSEQ_MODE_FLAGS(LinkNum) (LmSCRATCH(LinkNum) + 0x0004)
2167
2168/* Mode flag macros (byte 0) */
2169#define SAS_SAVECTX_OCCURRED 0x80
2170#define SAS_OOBSVC_OCCURRED 0x40
2171#define SAS_OOB_DEVICE_PRESENT 0x20
2172#define SAS_CFGHDR_OCCURRED 0x10
2173#define SAS_RCV_INTS_ARE_DISABLED 0x08
2174#define SAS_OOB_HOT_PLUG_CNCT 0x04
2175#define SAS_AWAIT_OPEN_CONNECTION 0x02
2176#define SAS_CFGCMPLT_OCCURRED 0x01
2177
2178/* Mode flag macros (byte 1) */
2179#define SAS_RLSSCB_OCCURRED 0x80
2180#define SAS_FORCED_HEADER_MISS 0x40
2181
2182#define LmSEQ_RET_ADDR2(LinkNum) (LmSCRATCH(LinkNum) + 0x0006)
2183#define LmSEQ_RET_ADDR1(LinkNum) (LmSCRATCH(LinkNum) + 0x0008)
2184#define LmSEQ_OPCODE_TO_CSEQ(LinkNum) (LmSCRATCH(LinkNum) + 0x000B)
2185#define LmSEQ_DATA_TO_CSEQ(LinkNum) (LmSCRATCH(LinkNum) + 0x000C)
2186
2187/* Mode dependent scratch page 0 macros for mode 0 (non-common) */
2188/* Absolute offsets */
2189#define LmSEQ_FIRST_INV_DDB_SITE(LinkNum) (LmSCRATCH(LinkNum) + 0x000E)
2190#define LmSEQ_EMPTY_TRANS_CTX(LinkNum) (LmSCRATCH(LinkNum) + 0x0010)
2191#define LmSEQ_RESP_LEN(LinkNum) (LmSCRATCH(LinkNum) + 0x0012)
2192#define LmSEQ_FIRST_INV_SCB_SITE(LinkNum) (LmSCRATCH(LinkNum) + 0x0014)
2193#define LmSEQ_INTEN_SAVE(LinkNum) (LmSCRATCH(LinkNum) + 0x0016)
2194#define LmSEQ_LINK_RST_FRM_LEN(LinkNum) (LmSCRATCH(LinkNum) + 0x001A)
2195#define LmSEQ_LINK_RST_PROTOCOL(LinkNum) (LmSCRATCH(LinkNum) + 0x001B)
2196#define LmSEQ_RESP_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x001C)
2197#define LmSEQ_LAST_LOADED_SGE(LinkNum) (LmSCRATCH(LinkNum) + 0x001D)
2198#define LmSEQ_SAVE_SCBPTR(LinkNum) (LmSCRATCH(LinkNum) + 0x001E)
2199
2200/* Mode dependent scratch page 0 macros for mode 1 (non-common) */
2201/* Absolute offsets */
2202#define LmSEQ_Q_XMIT_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x008E)
2203#define LmSEQ_M1_EMPTY_TRANS_CTX(LinkNum) (LmSCRATCH(LinkNum) + 0x0090)
2204#define LmSEQ_INI_CONN_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x0092)
2205#define LmSEQ_FAILED_OPEN_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x009A)
2206#define LmSEQ_XMIT_REQUEST_TYPE(LinkNum) (LmSCRATCH(LinkNum) + 0x009B)
2207#define LmSEQ_M1_RESP_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x009C)
2208#define LmSEQ_M1_LAST_LOADED_SGE(LinkNum) (LmSCRATCH(LinkNum) + 0x009D)
2209#define LmSEQ_M1_SAVE_SCBPTR(LinkNum) (LmSCRATCH(LinkNum) + 0x009E)
2210
2211/* Mode dependent scratch page 0 macros for mode 2 (non-common) */
2212#define LmSEQ_PORT_COUNTER(LinkNum) (LmSCRATCH(LinkNum) + 0x010E)
2213#define LmSEQ_PM_TABLE_PTR(LinkNum) (LmSCRATCH(LinkNum) + 0x0110)
2214#define LmSEQ_SATA_INTERLOCK_TMR_SAVE(LinkNum) (LmSCRATCH(LinkNum) + 0x0112)
2215#define LmSEQ_IP_BITL(LinkNum) (LmSCRATCH(LinkNum) + 0x0114)
2216#define LmSEQ_COPY_SMP_CONN_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x0116)
2217#define LmSEQ_P0M2_OFFS1AH(LinkNum) (LmSCRATCH(LinkNum) + 0x011A)
2218
2219/* Mode dependent scratch page 0 macros for modes 4/5 (non-common) */
2220/* Absolute offsets */
2221#define LmSEQ_SAVED_OOB_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x006E)
2222#define LmSEQ_SAVED_OOB_MODE(LinkNum) (LmSCRATCH(LinkNum) + 0x006F)
2223#define LmSEQ_Q_LINK_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x0070)
2224#define LmSEQ_LINK_RST_ERR(LinkNum) (LmSCRATCH(LinkNum) + 0x0072)
2225#define LmSEQ_SAVED_OOB_SIGNALS(LinkNum) (LmSCRATCH(LinkNum) + 0x0073)
2226#define LmSEQ_SAS_RESET_MODE(LinkNum) (LmSCRATCH(LinkNum) + 0x0074)
2227#define LmSEQ_LINK_RESET_RETRY_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0075)
2228#define LmSEQ_NUM_LINK_RESET_RETRIES(LinkNum) (LmSCRATCH(LinkNum) + 0x0076)
2229#define LmSEQ_OOB_INT_ENABLES(LinkNum) (LmSCRATCH(LinkNum) + 0x007A)
2230#define LmSEQ_NOTIFY_TIMER_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x007C)
2231#define LmSEQ_NOTIFY_TIMER_DOWN_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x007E)
2232
2233/* Mode dependent scratch page 1, mode 0 and mode 1 */
2234#define LmSEQ_SG_LIST_PTR_ADDR0(LinkNum) (LmSCRATCH(LinkNum) + 0x0020)
2235#define LmSEQ_SG_LIST_PTR_ADDR1(LinkNum) (LmSCRATCH(LinkNum) + 0x0030)
2236#define LmSEQ_M1_SG_LIST_PTR_ADDR0(LinkNum) (LmSCRATCH(LinkNum) + 0x00A0)
2237#define LmSEQ_M1_SG_LIST_PTR_ADDR1(LinkNum) (LmSCRATCH(LinkNum) + 0x00B0)
2238
2239/* Mode dependent scratch page 1 macros for mode 2 */
2240/* Absolute offsets */
2241#define LmSEQ_INVALID_DWORD_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0120)
2242#define LmSEQ_DISPARITY_ERROR_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0124)
2243#define LmSEQ_LOSS_OF_SYNC_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0128)
2244
2245/* Mode dependent scratch page 1 macros for mode 4/5 */
2246#define LmSEQ_FRAME_TYPE_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00E0)
2247#define LmSEQ_HASHED_DEST_ADDR_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00E1)
2248#define LmSEQ_HASHED_SRC_ADDR_MASK_PRINT(LinkNum) (LmSCRATCH(LinkNum) + 0x00E4)
2249#define LmSEQ_HASHED_SRC_ADDR_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00E5)
2250#define LmSEQ_NUM_FILL_BYTES_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00EB)
2251#define LmSEQ_TAG_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00F0)
2252#define LmSEQ_TARGET_PORT_XFER_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x00F2)
2253#define LmSEQ_DATA_OFFSET(LinkNum) (LmSCRATCH(LinkNum) + 0x00F4)
2254
2255/* Mode dependent scratch page 2 macros for mode 0 */
2256/* Absolute offsets */
2257#define LmSEQ_SMP_RCV_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0040)
2258#define LmSEQ_DEVICE_BITS(LinkNum) (LmSCRATCH(LinkNum) + 0x005B)
2259#define LmSEQ_SDB_DDB(LinkNum) (LmSCRATCH(LinkNum) + 0x005C)
2260#define LmSEQ_SDB_NUM_TAGS(LinkNum) (LmSCRATCH(LinkNum) + 0x005E)
2261#define LmSEQ_SDB_CURR_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x005F)
2262
2263/* Mode dependent scratch page 2 macros for mode 1 */
2264/* Absolute offsets */
2265/* byte 0 bits 1-0 are domain select. */
2266#define LmSEQ_TX_ID_ADDR_FRAME(LinkNum) (LmSCRATCH(LinkNum) + 0x00C0)
2267#define LmSEQ_OPEN_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x00C8)
2268#define LmSEQ_SRST_AS_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x00CC)
2269#define LmSEQ_LAST_LOADED_SG_EL(LinkNum) (LmSCRATCH(LinkNum) + 0x00D4)
2270
2271/* Mode dependent scratch page 2 macros for mode 2 */
2272/* Absolute offsets */
2273#define LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0140)
2274#define LmSEQ_CLOSE_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0144)
2275#define LmSEQ_BREAK_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0148)
2276#define LmSEQ_DWS_RESET_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x014C)
2277#define LmSEQ_SATA_INTERLOCK_TIMER_TERM_TS(LinkNum) \
2278 (LmSCRATCH(LinkNum) + 0x0150)
2279#define LmSEQ_MCTL_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0154)
2280
2281/* Mode dependent scratch page 2 macros for mode 5 */
2282#define LmSEQ_COMINIT_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0160)
2283#define LmSEQ_RCV_ID_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0164)
2284#define LmSEQ_RCV_FIS_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0168)
2285#define LmSEQ_DEV_PRES_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x016C)
2286
2287/* Mode dependent scratch page 3 macros for modes 0 and 1 */
2288/* None defined */
2289
2290/* Mode dependent scratch page 3 macros for modes 2 and 5 */
2291/* None defined */
2292
2293/* Mode Independent Scratch page 0 macros. */
2294#define LmSEQ_Q_TGTXFR_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x0180)
2295#define LmSEQ_Q_TGTXFR_TAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x0182)
2296#define LmSEQ_LINK_NUMBER(LinkNum) (LmSCRATCH(LinkNum) + 0x0186)
2297#define LmSEQ_SCRATCH_FLAGS(LinkNum) (LmSCRATCH(LinkNum) + 0x0187)
2298/*
2299 * Currently only bit 0, SAS_DWSAQD, is used.
2300 */
2301#define SAS_DWSAQD 0x01 /*
2302 * DWSSTATUS: DWSAQD
2303 * bit las read in ISR.
2304 */
2305#define LmSEQ_CONNECTION_STATE(LinkNum) (LmSCRATCH(LinkNum) + 0x0188)
2306/* Connection states (byte 0) */
2307#define SAS_WE_OPENED_CS 0x01
2308#define SAS_DEVICE_OPENED_CS 0x02
2309#define SAS_WE_SENT_DONE_CS 0x04
2310#define SAS_DEVICE_SENT_DONE_CS 0x08
2311#define SAS_WE_SENT_CLOSE_CS 0x10
2312#define SAS_DEVICE_SENT_CLOSE_CS 0x20
2313#define SAS_WE_SENT_BREAK_CS 0x40
2314#define SAS_DEVICE_SENT_BREAK_CS 0x80
2315/* Connection states (byte 1) */
2316#define SAS_OPN_TIMEOUT_OR_OPN_RJCT_CS 0x01
2317#define SAS_AIP_RECEIVED_CS 0x02
2318#define SAS_CREDIT_TIMEOUT_OCCURRED_CS 0x04
2319#define SAS_ACKNAK_TIMEOUT_OCCURRED_CS 0x08
2320#define SAS_SMPRSP_TIMEOUT_OCCURRED_CS 0x10
2321#define SAS_DONE_TIMEOUT_OCCURRED_CS 0x20
2322/* Connection states (byte 2) */
2323#define SAS_SMP_RESPONSE_RECEIVED_CS 0x01
2324#define SAS_INTLK_TIMEOUT_OCCURRED_CS 0x02
2325#define SAS_DEVICE_SENT_DMAT_CS 0x04
2326#define SAS_DEVICE_SENT_SYNCSRST_CS 0x08
2327#define SAS_CLEARING_AFFILIATION_CS 0x20
2328#define SAS_RXTASK_ACTIVE_CS 0x40
2329#define SAS_TXTASK_ACTIVE_CS 0x80
2330/* Connection states (byte 3) */
2331#define SAS_PHY_LOSS_OF_SIGNAL_CS 0x01
2332#define SAS_DWS_TIMER_EXPIRED_CS 0x02
2333#define SAS_LINK_RESET_NOT_COMPLETE_CS 0x04
2334#define SAS_PHY_DISABLED_CS 0x08
2335#define SAS_LINK_CTL_TASK_ACTIVE_CS 0x10
2336#define SAS_PHY_EVENT_TASK_ACTIVE_CS 0x20
2337#define SAS_DEVICE_SENT_ID_FRAME_CS 0x40
2338#define SAS_DEVICE_SENT_REG_FIS_CS 0x40
2339#define SAS_DEVICE_SENT_HARD_RESET_CS 0x80
2340#define SAS_PHY_IS_DOWN_FLAGS (SAS_PHY_LOSS_OF_SIGNAL_CS|\
2341 SAS_DWS_TIMER_EXPIRED_CS |\
2342 SAS_LINK_RESET_NOT_COMPLETE_CS|\
2343 SAS_PHY_DISABLED_CS)
2344
2345#define SAS_LINK_CTL_PHY_EVENT_FLAGS (SAS_LINK_CTL_TASK_ACTIVE_CS |\
2346 SAS_PHY_EVENT_TASK_ACTIVE_CS |\
2347 SAS_DEVICE_SENT_ID_FRAME_CS |\
2348 SAS_DEVICE_SENT_HARD_RESET_CS)
2349
2350#define LmSEQ_CONCTL(LinkNum) (LmSCRATCH(LinkNum) + 0x018C)
2351#define LmSEQ_CONSTAT(LinkNum) (LmSCRATCH(LinkNum) + 0x018E)
2352#define LmSEQ_CONNECTION_MODES(LinkNum) (LmSCRATCH(LinkNum) + 0x018F)
2353#define LmSEQ_REG1_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0192)
2354#define LmSEQ_REG2_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0194)
2355#define LmSEQ_REG3_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0196)
2356#define LmSEQ_REG0_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0198)
2357
2358/* Mode independent scratch page 1 macros. */
2359#define LmSEQ_EST_NEXUS_SCBPTR0(LinkNum) (LmSCRATCH(LinkNum) + 0x01A0)
2360#define LmSEQ_EST_NEXUS_SCBPTR1(LinkNum) (LmSCRATCH(LinkNum) + 0x01A2)
2361#define LmSEQ_EST_NEXUS_SCBPTR2(LinkNum) (LmSCRATCH(LinkNum) + 0x01A4)
2362#define LmSEQ_EST_NEXUS_SCBPTR3(LinkNum) (LmSCRATCH(LinkNum) + 0x01A6)
2363#define LmSEQ_EST_NEXUS_SCB_OPCODE0(LinkNum) (LmSCRATCH(LinkNum) + 0x01A8)
2364#define LmSEQ_EST_NEXUS_SCB_OPCODE1(LinkNum) (LmSCRATCH(LinkNum) + 0x01A9)
2365#define LmSEQ_EST_NEXUS_SCB_OPCODE2(LinkNum) (LmSCRATCH(LinkNum) + 0x01AA)
2366#define LmSEQ_EST_NEXUS_SCB_OPCODE3(LinkNum) (LmSCRATCH(LinkNum) + 0x01AB)
2367#define LmSEQ_EST_NEXUS_SCB_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x01AC)
2368#define LmSEQ_EST_NEXUS_SCB_TAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01AD)
2369#define LmSEQ_EST_NEXUS_BUF_AVAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01AE)
2370#define LmSEQ_TIMEOUT_CONST(LinkNum) (LmSCRATCH(LinkNum) + 0x01B8)
2371#define LmSEQ_ISR_SAVE_SINDEX(LinkNum) (LmSCRATCH(LinkNum) + 0x01BC)
2372#define LmSEQ_ISR_SAVE_DINDEX(LinkNum) (LmSCRATCH(LinkNum) + 0x01BE)
2373
2374/* Mode independent scratch page 2 macros. */
2375#define LmSEQ_EMPTY_SCB_PTR0(LinkNum) (LmSCRATCH(LinkNum) + 0x01C0)
2376#define LmSEQ_EMPTY_SCB_PTR1(LinkNum) (LmSCRATCH(LinkNum) + 0x01C2)
2377#define LmSEQ_EMPTY_SCB_PTR2(LinkNum) (LmSCRATCH(LinkNum) + 0x01C4)
2378#define LmSEQ_EMPTY_SCB_PTR3(LinkNum) (LmSCRATCH(LinkNum) + 0x01C6)
2379#define LmSEQ_EMPTY_SCB_OPCD0(LinkNum) (LmSCRATCH(LinkNum) + 0x01C8)
2380#define LmSEQ_EMPTY_SCB_OPCD1(LinkNum) (LmSCRATCH(LinkNum) + 0x01C9)
2381#define LmSEQ_EMPTY_SCB_OPCD2(LinkNum) (LmSCRATCH(LinkNum) + 0x01CA)
2382#define LmSEQ_EMPTY_SCB_OPCD3(LinkNum) (LmSCRATCH(LinkNum) + 0x01CB)
2383#define LmSEQ_EMPTY_SCB_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x01CC)
2384#define LmSEQ_EMPTY_SCB_TAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01CD)
2385#define LmSEQ_EMPTY_BUFS_AVAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01CE)
2386#define LmSEQ_ATA_SCR_REGS(LinkNum) (LmSCRATCH(LinkNum) + 0x01D4)
2387
2388/* Mode independent scratch page 3 macros. */
2389#define LmSEQ_DEV_PRES_TMR_TOUT_CONST(LinkNum) (LmSCRATCH(LinkNum) + 0x01E0)
2390#define LmSEQ_SATA_INTERLOCK_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01E4)
2391#define LmSEQ_STP_SHUTDOWN_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01E8)
2392#define LmSEQ_SRST_ASSERT_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01EC)
2393#define LmSEQ_RCV_FIS_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01F0)
2394#define LmSEQ_ONE_MILLISEC_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01F4)
2395#define LmSEQ_TEN_MS_COMINIT_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01F8)
2396#define LmSEQ_SMP_RCV_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01FC)
2397
2398#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_sas.h b/drivers/scsi/aic94xx/aic94xx_sas.h
new file mode 100644
index 000000000000..64d231712345
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_sas.h
@@ -0,0 +1,785 @@
1/*
2 * Aic94xx SAS/SATA driver SAS definitions and hardware interface header file.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#ifndef _AIC94XX_SAS_H_
28#define _AIC94XX_SAS_H_
29
30#include <scsi/libsas.h>
31
32/* ---------- DDBs ---------- */
33/* DDBs are device descriptor blocks which describe a device in the
34 * domain that this sequencer can maintain low-level connections for
35 * us. They are be 64 bytes.
36 */
37
38struct asd_ddb_ssp_smp_target_port {
39 u8 conn_type; /* byte 0 */
40#define DDB_TP_CONN_TYPE 0x81 /* Initiator port and addr frame type 0x01 */
41
42 u8 conn_rate;
43 __be16 init_conn_tag;
44 u8 dest_sas_addr[8]; /* bytes 4-11 */
45
46 __le16 send_queue_head;
47 u8 sq_suspended;
48 u8 ddb_type; /* DDB_TYPE_TARGET */
49#define DDB_TYPE_UNUSED 0xFF
50#define DDB_TYPE_TARGET 0xFE
51#define DDB_TYPE_INITIATOR 0xFD
52#define DDB_TYPE_PM_PORT 0xFC
53
54 __le16 _r_a;
55 __be16 awt_def;
56
57 u8 compat_features; /* byte 20 */
58 u8 pathway_blocked_count;
59 __be16 arb_wait_time;
60 __be32 more_compat_features; /* byte 24 */
61
62 u8 conn_mask;
63 u8 flags; /* concurrent conn:2,2 and open:0(1) */
64#define CONCURRENT_CONN_SUPP 0x04
65#define OPEN_REQUIRED 0x01
66
67 u16 _r_b;
68 __le16 exec_queue_tail;
69 __le16 send_queue_tail;
70 __le16 sister_ddb;
71
72 __le16 _r_c;
73
74 u8 max_concurrent_conn;
75 u8 num_concurrent_conn;
76 u8 num_contexts;
77
78 u8 _r_d;
79
80 __le16 active_task_count;
81
82 u8 _r_e[9];
83
84 u8 itnl_reason; /* I_T nexus loss reason */
85
86 __le16 _r_f;
87
88 __le16 itnl_timeout;
89#define ITNL_TIMEOUT_CONST 0x7D0 /* 2 seconds */
90
91 __le32 itnl_timestamp;
92} __attribute__ ((packed));
93
94struct asd_ddb_stp_sata_target_port {
95 u8 conn_type; /* byte 0 */
96 u8 conn_rate;
97 __be16 init_conn_tag;
98 u8 dest_sas_addr[8]; /* bytes 4-11 */
99
100 __le16 send_queue_head;
101 u8 sq_suspended;
102 u8 ddb_type; /* DDB_TYPE_TARGET */
103
104 __le16 _r_a;
105
106 __be16 awt_def;
107 u8 compat_features; /* byte 20 */
108 u8 pathway_blocked_count;
109 __be16 arb_wait_time;
110 __be32 more_compat_features; /* byte 24 */
111
112 u8 conn_mask;
113 u8 flags; /* concurrent conn:2,2 and open:0(1) */
114#define SATA_MULTIPORT 0x80
115#define SUPPORTS_AFFIL 0x40
116#define STP_AFFIL_POL 0x20
117
118 u8 _r_b;
119 u8 flags2; /* STP close policy:0 */
120#define STP_CL_POL_NO_TX 0x00
121#define STP_CL_POL_BTW_CMDS 0x01
122
123 __le16 exec_queue_tail;
124 __le16 send_queue_tail;
125 __le16 sister_ddb;
126 __le16 ata_cmd_scbptr;
127 __le32 sata_tag_alloc_mask;
128 __le16 active_task_count;
129 __le16 _r_c;
130 __le32 sata_sactive;
131 u8 num_sata_tags;
132 u8 sata_status;
133 u8 sata_ending_status;
134 u8 itnl_reason; /* I_T nexus loss reason */
135 __le16 ncq_data_scb_ptr;
136 __le16 itnl_timeout;
137 __le32 itnl_timestamp;
138} __attribute__ ((packed));
139
140/* This struct asd_ddb_init_port, describes the device descriptor block
141 * of an initiator port (when the sequencer is operating in target mode).
142 * Bytes [0,11] and [20,27] are from the OPEN address frame.
143 * The sequencer allocates an initiator port DDB entry.
144 */
145struct asd_ddb_init_port {
146 u8 conn_type; /* byte 0 */
147 u8 conn_rate;
148 __be16 init_conn_tag; /* BE */
149 u8 dest_sas_addr[8];
150 __le16 send_queue_head; /* LE, byte 12 */
151 u8 sq_suspended;
152 u8 ddb_type; /* DDB_TYPE_INITIATOR */
153 __le16 _r_a;
154 __be16 awt_def; /* BE */
155 u8 compat_features;
156 u8 pathway_blocked_count;
157 __be16 arb_wait_time; /* BE */
158 __be32 more_compat_features; /* BE */
159 u8 conn_mask;
160 u8 flags; /* == 5 */
161 u16 _r_b;
162 __le16 exec_queue_tail; /* execution queue tail */
163 __le16 send_queue_tail;
164 __le16 sister_ddb;
165 __le16 init_resp_timeout; /* initiator response timeout */
166 __le32 _r_c;
167 __le16 active_tasks; /* active task count */
168 __le16 init_list; /* initiator list link pointer */
169 __le32 _r_d;
170 u8 max_conn_to[3]; /* from Conn-Disc mode page, in us, LE */
171 u8 itnl_reason; /* I_T nexus loss reason */
172 __le16 bus_inact_to; /* from Conn-Disc mode page, in 100 us, LE */
173 __le16 itnl_to; /* from the Protocol Specific Port Ctrl MP */
174 __le32 itnl_timestamp;
175} __attribute__ ((packed));
176
177/* This struct asd_ddb_sata_tag, describes a look-up table to be used
178 * by the sequencers. SATA II, IDENTIFY DEVICE data, word 76, bit 8:
179 * NCQ support. This table is used by the sequencers to find the
180 * corresponding SCB, given a SATA II tag value.
181 */
182struct asd_ddb_sata_tag {
183 __le16 scb_pointer[32];
184} __attribute__ ((packed));
185
186/* This struct asd_ddb_sata_pm_table, describes a port number to
187 * connection handle look-up table. SATA targets attached to a port
188 * multiplier require a 4-bit port number value. There is one DDB
189 * entry of this type for each SATA port multiplier (sister DDB).
190 * Given a SATA PM port number, this table gives us the SATA PM Port
191 * DDB of the SATA port multiplier port (i.e. the SATA target
192 * discovered on the port).
193 */
194struct asd_ddb_sata_pm_table {
195 __le16 ddb_pointer[16];
196 __le16 _r_a[16];
197} __attribute__ ((packed));
198
199/* This struct asd_ddb_sata_pm_port, describes the SATA port multiplier
200 * port format DDB.
201 */
202struct asd_ddb_sata_pm_port {
203 u8 _r_a[15];
204 u8 ddb_type;
205 u8 _r_b[13];
206 u8 pm_port_flags;
207#define PM_PORT_MASK 0xF0
208#define PM_PORT_SET 0x02
209 u8 _r_c[6];
210 __le16 sister_ddb;
211 __le16 ata_cmd_scbptr;
212 __le32 sata_tag_alloc_mask;
213 __le16 active_task_count;
214 __le16 parent_ddb;
215 __le32 sata_sactive;
216 u8 num_sata_tags;
217 u8 sata_status;
218 u8 sata_ending_status;
219 u8 _r_d[9];
220} __attribute__ ((packed));
221
222/* This struct asd_ddb_seq_shared, describes a DDB shared by the
223 * central and link sequencers. port_map_by_links is indexed phy
224 * number [0,7]; each byte is a bit mask of all the phys that are in
225 * the same port as the indexed phy.
226 */
227struct asd_ddb_seq_shared {
228 __le16 q_free_ddb_head;
229 __le16 q_free_ddb_tail;
230 __le16 q_free_ddb_cnt;
231 __le16 q_used_ddb_head;
232 __le16 q_used_ddb_tail;
233 __le16 shared_mem_lock;
234 __le16 smp_conn_tag;
235 __le16 est_nexus_buf_cnt;
236 __le16 est_nexus_buf_thresh;
237 u32 _r_a;
238 u8 settable_max_contexts;
239 u8 _r_b[23];
240 u8 conn_not_active;
241 u8 phy_is_up;
242 u8 _r_c[8];
243 u8 port_map_by_links[8];
244} __attribute__ ((packed));
245
246/* ---------- SG Element ---------- */
247
248/* This struct sg_el, describes the hardware scatter gather buffer
249 * element. All entries are little endian. In an SCB, there are 2 of
250 * this, plus one more, called a link element of this indicating a
251 * sublist if needed.
252 *
253 * A link element has only the bus address set and the flags (DS) bit
254 * valid. The bus address points to the start of the sublist.
255 *
256 * If a sublist is needed, then that sublist should also include the 2
257 * sg_el embedded in the SCB, in which case next_sg_offset is 32,
258 * since sizeof(sg_el) = 16; EOS should be 1 and EOL 0 in this case.
259 */
260struct sg_el {
261 __le64 bus_addr;
262 __le32 size;
263 __le16 _r;
264 u8 next_sg_offs;
265 u8 flags;
266#define ASD_SG_EL_DS_MASK 0x30
267#define ASD_SG_EL_DS_OCM 0x10
268#define ASD_SG_EL_DS_HM 0x00
269#define ASD_SG_EL_LIST_MASK 0xC0
270#define ASD_SG_EL_LIST_EOL 0x40
271#define ASD_SG_EL_LIST_EOS 0x80
272} __attribute__ ((packed));
273
274/* ---------- SCBs ---------- */
275
276/* An SCB (sequencer control block) is comprised of a common header
277 * and a task part, for a total of 128 bytes. All fields are in LE
278 * order, unless otherwise noted.
279 */
280
281/* This struct scb_header, defines the SCB header format.
282 */
283struct scb_header {
284 __le64 next_scb;
285 __le16 index; /* transaction context */
286 u8 opcode;
287} __attribute__ ((packed));
288
289/* SCB opcodes: Execution queue
290 */
291#define INITIATE_SSP_TASK 0x00
292#define INITIATE_LONG_SSP_TASK 0x01
293#define INITIATE_BIDIR_SSP_TASK 0x02
294#define ABORT_TASK 0x03
295#define INITIATE_SSP_TMF 0x04
296#define SSP_TARG_GET_DATA 0x05
297#define SSP_TARG_GET_DATA_GOOD 0x06
298#define SSP_TARG_SEND_RESP 0x07
299#define QUERY_SSP_TASK 0x08
300#define INITIATE_ATA_TASK 0x09
301#define INITIATE_ATAPI_TASK 0x0a
302#define CONTROL_ATA_DEV 0x0b
303#define INITIATE_SMP_TASK 0x0c
304#define SMP_TARG_SEND_RESP 0x0f
305
306/* SCB opcodes: Send Queue
307 */
308#define SSP_TARG_SEND_DATA 0x40
309#define SSP_TARG_SEND_DATA_GOOD 0x41
310
311/* SCB opcodes: Link Queue
312 */
313#define CONTROL_PHY 0x80
314#define SEND_PRIMITIVE 0x81
315#define INITIATE_LINK_ADM_TASK 0x82
316
317/* SCB opcodes: other
318 */
319#define EMPTY_SCB 0xc0
320#define INITIATE_SEQ_ADM_TASK 0xc1
321#define EST_ICL_TARG_WINDOW 0xc2
322#define COPY_MEM 0xc3
323#define CLEAR_NEXUS 0xc4
324#define INITIATE_DDB_ADM_TASK 0xc6
325#define ESTABLISH_NEXUS_ESCB 0xd0
326
327#define LUN_SIZE 8
328
329/* See SAS spec, task IU
330 */
331struct ssp_task_iu {
332 u8 lun[LUN_SIZE]; /* BE */
333 u16 _r_a;
334 u8 tmf;
335 u8 _r_b;
336 __be16 tag; /* BE */
337 u8 _r_c[14];
338} __attribute__ ((packed));
339
340/* See SAS spec, command IU
341 */
342struct ssp_command_iu {
343 u8 lun[LUN_SIZE];
344 u8 _r_a;
345 u8 efb_prio_attr; /* enable first burst, task prio & attr */
346#define EFB_MASK 0x80
347#define TASK_PRIO_MASK 0x78
348#define TASK_ATTR_MASK 0x07
349
350 u8 _r_b;
351 u8 add_cdb_len; /* in dwords, since bit 0,1 are reserved */
352 union {
353 u8 cdb[16];
354 struct {
355 __le64 long_cdb_addr; /* bus address, LE */
356 __le32 long_cdb_size; /* LE */
357 u8 _r_c[3];
358 u8 eol_ds; /* eol:6,6, ds:5,4 */
359 } long_cdb; /* sequencer extension */
360 };
361} __attribute__ ((packed));
362
363struct xfer_rdy_iu {
364 __be32 requested_offset; /* BE */
365 __be32 write_data_len; /* BE */
366 __be32 _r_a;
367} __attribute__ ((packed));
368
369/* ---------- SCB tasks ---------- */
370
371/* This is both ssp_task and long_ssp_task
372 */
373struct initiate_ssp_task {
374 u8 proto_conn_rate; /* proto:6,4, conn_rate:3,0 */
375 __le32 total_xfer_len;
376 struct ssp_frame_hdr ssp_frame;
377 struct ssp_command_iu ssp_cmd;
378 __le16 sister_scb; /* 0xFFFF */
379 __le16 conn_handle; /* index to DDB for the intended target */
380 u8 data_dir; /* :1,0 */
381#define DATA_DIR_NONE 0x00
382#define DATA_DIR_IN 0x01
383#define DATA_DIR_OUT 0x02
384#define DATA_DIR_BYRECIPIENT 0x03
385
386 u8 _r_a;
387 u8 retry_count;
388 u8 _r_b[5];
389 struct sg_el sg_element[3]; /* 2 real and 1 link */
390} __attribute__ ((packed));
391
392/* This defines both ata_task and atapi_task.
393 * ata: C bit of FIS should be 1,
394 * atapi: C bit of FIS should be 1, and command register should be 0xA0,
395 * to indicate a packet command.
396 */
397struct initiate_ata_task {
398 u8 proto_conn_rate;
399 __le32 total_xfer_len;
400 struct host_to_dev_fis fis;
401 __le32 data_offs;
402 u8 atapi_packet[16];
403 u8 _r_a[12];
404 __le16 sister_scb;
405 __le16 conn_handle;
406 u8 ata_flags; /* CSMI:6,6, DTM:4,4, QT:3,3, data dir:1,0 */
407#define CSMI_TASK 0x40
408#define DATA_XFER_MODE_DMA 0x10
409#define ATA_Q_TYPE_MASK 0x08
410#define ATA_Q_TYPE_UNTAGGED 0x00
411#define ATA_Q_TYPE_NCQ 0x08
412
413 u8 _r_b;
414 u8 retry_count;
415 u8 _r_c;
416 u8 flags;
417#define STP_AFFIL_POLICY 0x20
418#define SET_AFFIL_POLICY 0x10
419#define RET_PARTIAL_SGLIST 0x02
420
421 u8 _r_d[3];
422 struct sg_el sg_element[3];
423} __attribute__ ((packed));
424
425struct initiate_smp_task {
426 u8 proto_conn_rate;
427 u8 _r_a[40];
428 struct sg_el smp_req;
429 __le16 sister_scb;
430 __le16 conn_handle;
431 u8 _r_c[8];
432 struct sg_el smp_resp;
433 u8 _r_d[32];
434} __attribute__ ((packed));
435
436struct control_phy {
437 u8 phy_id;
438 u8 sub_func;
439#define DISABLE_PHY 0x00
440#define ENABLE_PHY 0x01
441#define RELEASE_SPINUP_HOLD 0x02
442#define ENABLE_PHY_NO_SAS_OOB 0x03
443#define ENABLE_PHY_NO_SATA_OOB 0x04
444#define PHY_NO_OP 0x05
445#define EXECUTE_HARD_RESET 0x81
446
447 u8 func_mask;
448 u8 speed_mask;
449 u8 hot_plug_delay;
450 u8 port_type;
451 u8 flags;
452#define DEV_PRES_TIMER_OVERRIDE_ENABLE 0x01
453#define DISABLE_PHY_IF_OOB_FAILS 0x02
454
455 __le32 timeout_override;
456 u8 link_reset_retries;
457 u8 _r_a[47];
458 __le16 conn_handle;
459 u8 _r_b[56];
460} __attribute__ ((packed));
461
462struct control_ata_dev {
463 u8 proto_conn_rate;
464 __le32 _r_a;
465 struct host_to_dev_fis fis;
466 u8 _r_b[32];
467 __le16 sister_scb;
468 __le16 conn_handle;
469 u8 ata_flags; /* 0 */
470 u8 _r_c[55];
471} __attribute__ ((packed));
472
473struct empty_scb {
474 u8 num_valid;
475 __le32 _r_a;
476#define ASD_EDBS_PER_SCB 7
477/* header+data+CRC+DMA suffix data */
478#define ASD_EDB_SIZE (24+1024+4+16)
479 struct sg_el eb[ASD_EDBS_PER_SCB];
480#define ELEMENT_NOT_VALID 0xC0
481} __attribute__ ((packed));
482
483struct initiate_link_adm {
484 u8 phy_id;
485 u8 sub_func;
486#define GET_LINK_ERROR_COUNT 0x00
487#define RESET_LINK_ERROR_COUNT 0x01
488#define ENABLE_NOTIFY_SPINUP_INTS 0x02
489
490 u8 _r_a[57];
491 __le16 conn_handle;
492 u8 _r_b[56];
493} __attribute__ ((packed));
494
495struct copy_memory {
496 u8 _r_a;
497 __le16 xfer_len;
498 __le16 _r_b;
499 __le64 src_busaddr;
500 u8 src_ds; /* See definition of sg_el */
501 u8 _r_c[45];
502 __le16 conn_handle;
503 __le64 _r_d;
504 __le64 dest_busaddr;
505 u8 dest_ds; /* See definition of sg_el */
506 u8 _r_e[39];
507} __attribute__ ((packed));
508
509struct abort_task {
510 u8 proto_conn_rate;
511 __le32 _r_a;
512 struct ssp_frame_hdr ssp_frame;
513 struct ssp_task_iu ssp_task;
514 __le16 sister_scb;
515 __le16 conn_handle;
516 u8 flags; /* ovrd_itnl_timer:3,3, suspend_data_trans:2,2 */
517#define SUSPEND_DATA_TRANS 0x04
518
519 u8 _r_b;
520 u8 retry_count;
521 u8 _r_c[5];
522 __le16 index; /* Transaction context of task to be queried */
523 __le16 itnl_to;
524 u8 _r_d[44];
525} __attribute__ ((packed));
526
527struct clear_nexus {
528 u8 nexus;
529#define NEXUS_ADAPTER 0x00
530#define NEXUS_PORT 0x01
531#define NEXUS_I_T 0x02
532#define NEXUS_I_T_L 0x03
533#define NEXUS_TAG 0x04
534#define NEXUS_TRANS_CX 0x05
535#define NEXUS_SATA_TAG 0x06
536#define NEXUS_T_L 0x07
537#define NEXUS_L 0x08
538#define NEXUS_T_TAG 0x09
539
540 __le32 _r_a;
541 u8 flags;
542#define SUSPEND_TX 0x80
543#define RESUME_TX 0x40
544#define SEND_Q 0x04
545#define EXEC_Q 0x02
546#define NOTINQ 0x01
547
548 u8 _r_b[3];
549 u8 conn_mask;
550 u8 _r_c[19];
551 struct ssp_task_iu ssp_task; /* LUN and TAG */
552 __le16 _r_d;
553 __le16 conn_handle;
554 __le64 _r_e;
555 __le16 index; /* Transaction context of task to be cleared */
556 __le16 context; /* Clear nexus context */
557 u8 _r_f[44];
558} __attribute__ ((packed));
559
560struct initiate_ssp_tmf {
561 u8 proto_conn_rate;
562 __le32 _r_a;
563 struct ssp_frame_hdr ssp_frame;
564 struct ssp_task_iu ssp_task;
565 __le16 sister_scb;
566 __le16 conn_handle;
567 u8 flags; /* itnl override and suspend data tx */
568#define OVERRIDE_ITNL_TIMER 8
569
570 u8 _r_b;
571 u8 retry_count;
572 u8 _r_c[5];
573 __le16 index; /* Transaction context of task to be queried */
574 __le16 itnl_to;
575 u8 _r_d[44];
576} __attribute__ ((packed));
577
578/* Transmits an arbitrary primitive on the link.
579 * Used for NOTIFY and BROADCAST.
580 */
581struct send_prim {
582 u8 phy_id;
583 u8 wait_transmit; /* :0,0 */
584 u8 xmit_flags;
585#define XMTPSIZE_MASK 0xF0
586#define XMTPSIZE_SINGLE 0x10
587#define XMTPSIZE_REPEATED 0x20
588#define XMTPSIZE_CONT 0x20
589#define XMTPSIZE_TRIPLE 0x30
590#define XMTPSIZE_REDUNDANT 0x60
591#define XMTPSIZE_INF 0
592
593#define XMTCONTEN 0x04
594#define XMTPFRM 0x02 /* Transmit at the next frame boundary */
595#define XMTPIMM 0x01 /* Transmit immediately */
596
597 __le16 _r_a;
598 u8 prim[4]; /* K, D0, D1, D2 */
599 u8 _r_b[50];
600 __le16 conn_handle;
601 u8 _r_c[56];
602} __attribute__ ((packed));
603
604/* This describes both SSP Target Get Data and SSP Target Get Data And
605 * Send Good Response SCBs. Used when the sequencer is operating in
606 * target mode...
607 */
608struct ssp_targ_get_data {
609 u8 proto_conn_rate;
610 __le32 total_xfer_len;
611 struct ssp_frame_hdr ssp_frame;
612 struct xfer_rdy_iu xfer_rdy;
613 u8 lun[LUN_SIZE];
614 __le64 _r_a;
615 __le16 sister_scb;
616 __le16 conn_handle;
617 u8 data_dir; /* 01b */
618 u8 _r_b;
619 u8 retry_count;
620 u8 _r_c[5];
621 struct sg_el sg_element[3];
622} __attribute__ ((packed));
623
624/* ---------- The actual SCB struct ---------- */
625
626struct scb {
627 struct scb_header header;
628 union {
629 struct initiate_ssp_task ssp_task;
630 struct initiate_ata_task ata_task;
631 struct initiate_smp_task smp_task;
632 struct control_phy control_phy;
633 struct control_ata_dev control_ata_dev;
634 struct empty_scb escb;
635 struct initiate_link_adm link_adm;
636 struct copy_memory cp_mem;
637 struct abort_task abort_task;
638 struct clear_nexus clear_nexus;
639 struct initiate_ssp_tmf ssp_tmf;
640 };
641} __attribute__ ((packed));
642
643/* ---------- Done List ---------- */
644/* The done list entry opcode field is defined below.
645 * The mnemonic encoding and meaning is as follows:
646 * TC - Task Complete, status was received and acknowledged
647 * TF - Task Failed, indicates an error prior to receiving acknowledgment
648 * for the command:
649 * - no conn,
650 * - NACK or R_ERR received in response to this command,
651 * - credit blocked or not available, or in the case of SMP request,
652 * - no SMP response was received.
653 * In these four cases it is known that the target didn't receive the
654 * command.
655 * TI - Task Interrupted, error after the command was acknowledged. It is
656 * known that the command was received by the target.
657 * TU - Task Unacked, command was transmitted but neither ACK (R_OK) nor NAK
658 * (R_ERR) was received due to loss of signal, broken connection, loss of
659 * dword sync or other reason. The application client should send the
660 * appropriate task query.
661 * TA - Task Aborted, see TF.
662 * _RESP - The completion includes an empty buffer containing status.
663 * TO - Timeout.
664 */
665#define TC_NO_ERROR 0x00
666#define TC_UNDERRUN 0x01
667#define TC_OVERRUN 0x02
668#define TF_OPEN_TO 0x03
669#define TF_OPEN_REJECT 0x04
670#define TI_BREAK 0x05
671#define TI_PROTO_ERR 0x06
672#define TC_SSP_RESP 0x07
673#define TI_PHY_DOWN 0x08
674#define TF_PHY_DOWN 0x09
675#define TC_LINK_ADM_RESP 0x0a
676#define TC_CSMI 0x0b
677#define TC_ATA_RESP 0x0c
678#define TU_PHY_DOWN 0x0d
679#define TU_BREAK 0x0e
680#define TI_SATA_TO 0x0f
681#define TI_NAK 0x10
682#define TC_CONTROL_PHY 0x11
683#define TF_BREAK 0x12
684#define TC_RESUME 0x13
685#define TI_ACK_NAK_TO 0x14
686#define TF_SMPRSP_TO 0x15
687#define TF_SMP_XMIT_RCV_ERR 0x16
688#define TC_PARTIAL_SG_LIST 0x17
689#define TU_ACK_NAK_TO 0x18
690#define TU_SATA_TO 0x19
691#define TF_NAK_RECV 0x1a
692#define TA_I_T_NEXUS_LOSS 0x1b
693#define TC_ATA_R_ERR_RECV 0x1c
694#define TF_TMF_NO_CTX 0x1d
695#define TA_ON_REQ 0x1e
696#define TF_TMF_NO_TAG 0x1f
697#define TF_TMF_TAG_FREE 0x20
698#define TF_TMF_TASK_DONE 0x21
699#define TF_TMF_NO_CONN_HANDLE 0x22
700#define TC_TASK_CLEARED 0x23
701#define TI_SYNCS_RECV 0x24
702#define TU_SYNCS_RECV 0x25
703#define TF_IRTT_TO 0x26
704#define TF_NO_SMP_CONN 0x27
705#define TF_IU_SHORT 0x28
706#define TF_DATA_OFFS_ERR 0x29
707#define TF_INV_CONN_HANDLE 0x2a
708#define TF_REQUESTED_N_PENDING 0x2b
709
710/* 0xc1 - 0xc7: empty buffer received,
711 0xd1 - 0xd7: establish nexus empty buffer received
712*/
713/* This is the ESCB mask */
714#define ESCB_RECVD 0xC0
715
716
717/* This struct done_list_struct defines the done list entry.
718 * All fields are LE.
719 */
720struct done_list_struct {
721 __le16 index; /* aka transaction context */
722 u8 opcode;
723 u8 status_block[4];
724 u8 toggle; /* bit 0 */
725#define DL_TOGGLE_MASK 0x01
726} __attribute__ ((packed));
727
728/* ---------- PHYS ---------- */
729
730struct asd_phy {
731 struct asd_sas_phy sas_phy;
732 struct asd_phy_desc *phy_desc; /* hw profile */
733
734 struct sas_identify_frame *identify_frame;
735 struct asd_dma_tok *id_frm_tok;
736
737 u8 frame_rcvd[ASD_EDB_SIZE];
738};
739
740
741#define ASD_SCB_SIZE sizeof(struct scb)
742#define ASD_DDB_SIZE sizeof(struct asd_ddb_ssp_smp_target_port)
743
744/* Define this to 0 if you do not want NOTIFY (ENABLE SPINIP) sent.
745 * Default: 0x10 (it's a mask)
746 */
747#define ASD_NOTIFY_ENABLE_SPINUP 0x10
748
749/* If enabled, set this to the interval between transmission
750 * of NOTIFY (ENABLE SPINUP). In units of 200 us.
751 */
752#define ASD_NOTIFY_TIMEOUT 2500
753
754/* Initial delay after OOB, before we transmit NOTIFY (ENABLE SPINUP).
755 * If 0, transmit immediately. In milliseconds.
756 */
757#define ASD_NOTIFY_DOWN_COUNT 0
758
759/* Device present timer timeout constant, 10 ms. */
760#define ASD_DEV_PRESENT_TIMEOUT 0x2710
761
762#define ASD_SATA_INTERLOCK_TIMEOUT 0
763
764/* How long to wait before shutting down an STP connection, unless
765 * an STP target sent frame(s). 50 usec.
766 * IGNORED by the sequencer (i.e. value 0 always).
767 */
768#define ASD_STP_SHUTDOWN_TIMEOUT 0x0
769
770/* ATA soft reset timer timeout. 5 usec. */
771#define ASD_SRST_ASSERT_TIMEOUT 0x05
772
773/* 31 sec */
774#define ASD_RCV_FIS_TIMEOUT 0x01D905C0
775
776#define ASD_ONE_MILLISEC_TIMEOUT 0x03e8
777
778/* COMINIT timer */
779#define ASD_TEN_MILLISEC_TIMEOUT 0x2710
780#define ASD_COMINIT_TIMEOUT ASD_TEN_MILLISEC_TIMEOUT
781
782/* 1 sec */
783#define ASD_SMP_RCV_TIMEOUT 0x000F4240
784
785#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
new file mode 100644
index 000000000000..7ee49b51b724
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -0,0 +1,758 @@
1/*
2 * Aic94xx SAS/SATA driver SCB management.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#include <linux/pci.h>
28
29#include "aic94xx.h"
30#include "aic94xx_reg.h"
31#include "aic94xx_hwi.h"
32#include "aic94xx_seq.h"
33
34#include "aic94xx_dump.h"
35
36/* ---------- EMPTY SCB ---------- */
37
38#define DL_PHY_MASK 7
39#define BYTES_DMAED 0
40#define PRIMITIVE_RECVD 0x08
41#define PHY_EVENT 0x10
42#define LINK_RESET_ERROR 0x18
43#define TIMER_EVENT 0x20
44#define REQ_TASK_ABORT 0xF0
45#define REQ_DEVICE_RESET 0xF1
46#define SIGNAL_NCQ_ERROR 0xF2
47#define CLEAR_NCQ_ERROR 0xF3
48
49#define PHY_EVENTS_STATUS (CURRENT_LOSS_OF_SIGNAL | CURRENT_OOB_DONE \
50 | CURRENT_SPINUP_HOLD | CURRENT_GTO_TIMEOUT \
51 | CURRENT_OOB_ERROR)
52
53static inline void get_lrate_mode(struct asd_phy *phy, u8 oob_mode)
54{
55 struct sas_phy *sas_phy = phy->sas_phy.phy;
56
57 switch (oob_mode & 7) {
58 case PHY_SPEED_60:
59 /* FIXME: sas transport class doesn't have this */
60 phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS;
61 phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
62 break;
63 case PHY_SPEED_30:
64 phy->sas_phy.linkrate = SAS_LINK_RATE_3_0_GBPS;
65 phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
66 break;
67 case PHY_SPEED_15:
68 phy->sas_phy.linkrate = SAS_LINK_RATE_1_5_GBPS;
69 phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS;
70 break;
71 }
72 sas_phy->negotiated_linkrate = phy->sas_phy.linkrate;
73 sas_phy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
74 sas_phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
75 sas_phy->maximum_linkrate = phy->phy_desc->max_sas_lrate;
76 sas_phy->minimum_linkrate = phy->phy_desc->min_sas_lrate;
77
78 if (oob_mode & SAS_MODE)
79 phy->sas_phy.oob_mode = SAS_OOB_MODE;
80 else if (oob_mode & SATA_MODE)
81 phy->sas_phy.oob_mode = SATA_OOB_MODE;
82}
83
84static inline void asd_phy_event_tasklet(struct asd_ascb *ascb,
85 struct done_list_struct *dl)
86{
87 struct asd_ha_struct *asd_ha = ascb->ha;
88 struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
89 int phy_id = dl->status_block[0] & DL_PHY_MASK;
90 struct asd_phy *phy = &asd_ha->phys[phy_id];
91
92 u8 oob_status = dl->status_block[1] & PHY_EVENTS_STATUS;
93 u8 oob_mode = dl->status_block[2];
94
95 switch (oob_status) {
96 case CURRENT_LOSS_OF_SIGNAL:
97 /* directly attached device was removed */
98 ASD_DPRINTK("phy%d: device unplugged\n", phy_id);
99 asd_turn_led(asd_ha, phy_id, 0);
100 sas_phy_disconnected(&phy->sas_phy);
101 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
102 break;
103 case CURRENT_OOB_DONE:
104 /* hot plugged device */
105 asd_turn_led(asd_ha, phy_id, 1);
106 get_lrate_mode(phy, oob_mode);
107 ASD_DPRINTK("phy%d device plugged: lrate:0x%x, proto:0x%x\n",
108 phy_id, phy->sas_phy.linkrate, phy->sas_phy.iproto);
109 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
110 break;
111 case CURRENT_SPINUP_HOLD:
112 /* hot plug SATA, no COMWAKE sent */
113 asd_turn_led(asd_ha, phy_id, 1);
114 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
115 break;
116 case CURRENT_GTO_TIMEOUT:
117 case CURRENT_OOB_ERROR:
118 ASD_DPRINTK("phy%d error while OOB: oob status:0x%x\n", phy_id,
119 dl->status_block[1]);
120 asd_turn_led(asd_ha, phy_id, 0);
121 sas_phy_disconnected(&phy->sas_phy);
122 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
123 break;
124 }
125}
126
127/* If phys are enabled sparsely, this will do the right thing. */
128static inline unsigned ord_phy(struct asd_ha_struct *asd_ha,
129 struct asd_phy *phy)
130{
131 u8 enabled_mask = asd_ha->hw_prof.enabled_phys;
132 int i, k = 0;
133
134 for_each_phy(enabled_mask, enabled_mask, i) {
135 if (&asd_ha->phys[i] == phy)
136 return k;
137 k++;
138 }
139 return 0;
140}
141
142/**
143 * asd_get_attached_sas_addr -- extract/generate attached SAS address
144 * phy: pointer to asd_phy
145 * sas_addr: pointer to buffer where the SAS address is to be written
146 *
147 * This function extracts the SAS address from an IDENTIFY frame
148 * received. If OOB is SATA, then a SAS address is generated from the
149 * HA tables.
150 *
151 * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame
152 * buffer.
153 */
154static inline void asd_get_attached_sas_addr(struct asd_phy *phy, u8 *sas_addr)
155{
156 if (phy->sas_phy.frame_rcvd[0] == 0x34
157 && phy->sas_phy.oob_mode == SATA_OOB_MODE) {
158 struct asd_ha_struct *asd_ha = phy->sas_phy.ha->lldd_ha;
159 /* FIS device-to-host */
160 u64 addr = be64_to_cpu(*(__be64 *)phy->phy_desc->sas_addr);
161
162 addr += asd_ha->hw_prof.sata_name_base + ord_phy(asd_ha, phy);
163 *(__be64 *)sas_addr = cpu_to_be64(addr);
164 } else {
165 struct sas_identify_frame *idframe =
166 (void *) phy->sas_phy.frame_rcvd;
167 memcpy(sas_addr, idframe->sas_addr, SAS_ADDR_SIZE);
168 }
169}
170
171static inline void asd_bytes_dmaed_tasklet(struct asd_ascb *ascb,
172 struct done_list_struct *dl,
173 int edb_id, int phy_id)
174{
175 unsigned long flags;
176 int edb_el = edb_id + ascb->edb_index;
177 struct asd_dma_tok *edb = ascb->ha->seq.edb_arr[edb_el];
178 struct asd_phy *phy = &ascb->ha->phys[phy_id];
179 struct sas_ha_struct *sas_ha = phy->sas_phy.ha;
180 u16 size = ((dl->status_block[3] & 7) << 8) | dl->status_block[2];
181
182 size = min(size, (u16) sizeof(phy->frame_rcvd));
183
184 spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
185 memcpy(phy->sas_phy.frame_rcvd, edb->vaddr, size);
186 phy->sas_phy.frame_rcvd_size = size;
187 asd_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
188 spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
189 asd_dump_frame_rcvd(phy, dl);
190 sas_ha->notify_port_event(&phy->sas_phy, PORTE_BYTES_DMAED);
191}
192
193static inline void asd_link_reset_err_tasklet(struct asd_ascb *ascb,
194 struct done_list_struct *dl,
195 int phy_id)
196{
197 struct asd_ha_struct *asd_ha = ascb->ha;
198 struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
199 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
200 u8 lr_error = dl->status_block[1];
201 u8 retries_left = dl->status_block[2];
202
203 switch (lr_error) {
204 case 0:
205 ASD_DPRINTK("phy%d: Receive ID timer expired\n", phy_id);
206 break;
207 case 1:
208 ASD_DPRINTK("phy%d: Loss of signal\n", phy_id);
209 break;
210 case 2:
211 ASD_DPRINTK("phy%d: Loss of dword sync\n", phy_id);
212 break;
213 case 3:
214 ASD_DPRINTK("phy%d: Receive FIS timeout\n", phy_id);
215 break;
216 default:
217 ASD_DPRINTK("phy%d: unknown link reset error code: 0x%x\n",
218 phy_id, lr_error);
219 break;
220 }
221
222 asd_turn_led(asd_ha, phy_id, 0);
223 sas_phy_disconnected(sas_phy);
224 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
225
226 if (retries_left == 0) {
227 int num = 1;
228 struct asd_ascb *cp = asd_ascb_alloc_list(ascb->ha, &num,
229 GFP_ATOMIC);
230 if (!cp) {
231 asd_printk("%s: out of memory\n", __FUNCTION__);
232 goto out;
233 }
234 ASD_DPRINTK("phy%d: retries:0 performing link reset seq\n",
235 phy_id);
236 asd_build_control_phy(cp, phy_id, ENABLE_PHY);
237 if (asd_post_ascb_list(ascb->ha, cp, 1) != 0)
238 asd_ascb_free(cp);
239 }
240out:
241 ;
242}
243
244static inline void asd_primitive_rcvd_tasklet(struct asd_ascb *ascb,
245 struct done_list_struct *dl,
246 int phy_id)
247{
248 unsigned long flags;
249 struct sas_ha_struct *sas_ha = &ascb->ha->sas_ha;
250 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
251 u8 reg = dl->status_block[1];
252 u32 cont = dl->status_block[2] << ((reg & 3)*8);
253
254 reg &= ~3;
255 switch (reg) {
256 case LmPRMSTAT0BYTE0:
257 switch (cont) {
258 case LmBROADCH:
259 case LmBROADRVCH0:
260 case LmBROADRVCH1:
261 case LmBROADSES:
262 ASD_DPRINTK("phy%d: BROADCAST change received:%d\n",
263 phy_id, cont);
264 spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
265 sas_phy->sas_prim = ffs(cont);
266 spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
267 sas_ha->notify_port_event(sas_phy,PORTE_BROADCAST_RCVD);
268 break;
269
270 case LmUNKNOWNP:
271 ASD_DPRINTK("phy%d: unknown BREAK\n", phy_id);
272 break;
273
274 default:
275 ASD_DPRINTK("phy%d: primitive reg:0x%x, cont:0x%04x\n",
276 phy_id, reg, cont);
277 break;
278 }
279 break;
280 case LmPRMSTAT1BYTE0:
281 switch (cont) {
282 case LmHARDRST:
283 ASD_DPRINTK("phy%d: HARD_RESET primitive rcvd\n",
284 phy_id);
285 /* The sequencer disables all phys on that port.
286 * We have to re-enable the phys ourselves. */
287 sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
288 break;
289
290 default:
291 ASD_DPRINTK("phy%d: primitive reg:0x%x, cont:0x%04x\n",
292 phy_id, reg, cont);
293 break;
294 }
295 break;
296 default:
297 ASD_DPRINTK("unknown primitive register:0x%x\n",
298 dl->status_block[1]);
299 break;
300 }
301}
302
303/**
304 * asd_invalidate_edb -- invalidate an EDB and if necessary post the ESCB
305 * @ascb: pointer to Empty SCB
306 * @edb_id: index [0,6] to the empty data buffer which is to be invalidated
307 *
308 * After an EDB has been invalidated, if all EDBs in this ESCB have been
309 * invalidated, the ESCB is posted back to the sequencer.
310 * Context is tasklet/IRQ.
311 */
312void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id)
313{
314 struct asd_seq_data *seq = &ascb->ha->seq;
315 struct empty_scb *escb = &ascb->scb->escb;
316 struct sg_el *eb = &escb->eb[edb_id];
317 struct asd_dma_tok *edb = seq->edb_arr[ascb->edb_index + edb_id];
318
319 memset(edb->vaddr, 0, ASD_EDB_SIZE);
320 eb->flags |= ELEMENT_NOT_VALID;
321 escb->num_valid--;
322
323 if (escb->num_valid == 0) {
324 int i;
325 /* ASD_DPRINTK("reposting escb: vaddr: 0x%p, "
326 "dma_handle: 0x%08llx, next: 0x%08llx, "
327 "index:%d, opcode:0x%02x\n",
328 ascb->dma_scb.vaddr,
329 (u64)ascb->dma_scb.dma_handle,
330 le64_to_cpu(ascb->scb->header.next_scb),
331 le16_to_cpu(ascb->scb->header.index),
332 ascb->scb->header.opcode);
333 */
334 escb->num_valid = ASD_EDBS_PER_SCB;
335 for (i = 0; i < ASD_EDBS_PER_SCB; i++)
336 escb->eb[i].flags = 0;
337 if (!list_empty(&ascb->list))
338 list_del_init(&ascb->list);
339 i = asd_post_escb_list(ascb->ha, ascb, 1);
340 if (i)
341 asd_printk("couldn't post escb, err:%d\n", i);
342 }
343}
344
345static void escb_tasklet_complete(struct asd_ascb *ascb,
346 struct done_list_struct *dl)
347{
348 struct asd_ha_struct *asd_ha = ascb->ha;
349 struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
350 int edb = (dl->opcode & DL_PHY_MASK) - 1; /* [0xc1,0xc7] -> [0,6] */
351 u8 sb_opcode = dl->status_block[0];
352 int phy_id = sb_opcode & DL_PHY_MASK;
353 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
354
355 if (edb > 6 || edb < 0) {
356 ASD_DPRINTK("edb is 0x%x! dl->opcode is 0x%x\n",
357 edb, dl->opcode);
358 ASD_DPRINTK("sb_opcode : 0x%x, phy_id: 0x%x\n",
359 sb_opcode, phy_id);
360 ASD_DPRINTK("escb: vaddr: 0x%p, "
361 "dma_handle: 0x%llx, next: 0x%llx, "
362 "index:%d, opcode:0x%02x\n",
363 ascb->dma_scb.vaddr,
364 (unsigned long long)ascb->dma_scb.dma_handle,
365 (unsigned long long)
366 le64_to_cpu(ascb->scb->header.next_scb),
367 le16_to_cpu(ascb->scb->header.index),
368 ascb->scb->header.opcode);
369 }
370
371 sb_opcode &= ~DL_PHY_MASK;
372
373 switch (sb_opcode) {
374 case BYTES_DMAED:
375 ASD_DPRINTK("%s: phy%d: BYTES_DMAED\n", __FUNCTION__, phy_id);
376 asd_bytes_dmaed_tasklet(ascb, dl, edb, phy_id);
377 break;
378 case PRIMITIVE_RECVD:
379 ASD_DPRINTK("%s: phy%d: PRIMITIVE_RECVD\n", __FUNCTION__,
380 phy_id);
381 asd_primitive_rcvd_tasklet(ascb, dl, phy_id);
382 break;
383 case PHY_EVENT:
384 ASD_DPRINTK("%s: phy%d: PHY_EVENT\n", __FUNCTION__, phy_id);
385 asd_phy_event_tasklet(ascb, dl);
386 break;
387 case LINK_RESET_ERROR:
388 ASD_DPRINTK("%s: phy%d: LINK_RESET_ERROR\n", __FUNCTION__,
389 phy_id);
390 asd_link_reset_err_tasklet(ascb, dl, phy_id);
391 break;
392 case TIMER_EVENT:
393 ASD_DPRINTK("%s: phy%d: TIMER_EVENT, lost dw sync\n",
394 __FUNCTION__, phy_id);
395 asd_turn_led(asd_ha, phy_id, 0);
396 /* the device is gone */
397 sas_phy_disconnected(sas_phy);
398 sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT);
399 break;
400 case REQ_TASK_ABORT:
401 ASD_DPRINTK("%s: phy%d: REQ_TASK_ABORT\n", __FUNCTION__,
402 phy_id);
403 break;
404 case REQ_DEVICE_RESET:
405 ASD_DPRINTK("%s: phy%d: REQ_DEVICE_RESET\n", __FUNCTION__,
406 phy_id);
407 break;
408 case SIGNAL_NCQ_ERROR:
409 ASD_DPRINTK("%s: phy%d: SIGNAL_NCQ_ERROR\n", __FUNCTION__,
410 phy_id);
411 break;
412 case CLEAR_NCQ_ERROR:
413 ASD_DPRINTK("%s: phy%d: CLEAR_NCQ_ERROR\n", __FUNCTION__,
414 phy_id);
415 break;
416 default:
417 ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __FUNCTION__,
418 phy_id, sb_opcode);
419 ASD_DPRINTK("edb is 0x%x! dl->opcode is 0x%x\n",
420 edb, dl->opcode);
421 ASD_DPRINTK("sb_opcode : 0x%x, phy_id: 0x%x\n",
422 sb_opcode, phy_id);
423 ASD_DPRINTK("escb: vaddr: 0x%p, "
424 "dma_handle: 0x%llx, next: 0x%llx, "
425 "index:%d, opcode:0x%02x\n",
426 ascb->dma_scb.vaddr,
427 (unsigned long long)ascb->dma_scb.dma_handle,
428 (unsigned long long)
429 le64_to_cpu(ascb->scb->header.next_scb),
430 le16_to_cpu(ascb->scb->header.index),
431 ascb->scb->header.opcode);
432
433 break;
434 }
435
436 asd_invalidate_edb(ascb, edb);
437}
438
439int asd_init_post_escbs(struct asd_ha_struct *asd_ha)
440{
441 struct asd_seq_data *seq = &asd_ha->seq;
442 int i;
443
444 for (i = 0; i < seq->num_escbs; i++)
445 seq->escb_arr[i]->tasklet_complete = escb_tasklet_complete;
446
447 ASD_DPRINTK("posting %d escbs\n", i);
448 return asd_post_escb_list(asd_ha, seq->escb_arr[0], seq->num_escbs);
449}
450
451/* ---------- CONTROL PHY ---------- */
452
453#define CONTROL_PHY_STATUS (CURRENT_DEVICE_PRESENT | CURRENT_OOB_DONE \
454 | CURRENT_SPINUP_HOLD | CURRENT_GTO_TIMEOUT \
455 | CURRENT_OOB_ERROR)
456
457/**
458 * control_phy_tasklet_complete -- tasklet complete for CONTROL PHY ascb
459 * @ascb: pointer to an ascb
460 * @dl: pointer to the done list entry
461 *
462 * This function completes a CONTROL PHY scb and frees the ascb.
463 * A note on LEDs:
464 * - an LED blinks if there is IO though it,
465 * - if a device is connected to the LED, it is lit,
466 * - if no device is connected to the LED, is is dimmed (off).
467 */
468static void control_phy_tasklet_complete(struct asd_ascb *ascb,
469 struct done_list_struct *dl)
470{
471 struct asd_ha_struct *asd_ha = ascb->ha;
472 struct scb *scb = ascb->scb;
473 struct control_phy *control_phy = &scb->control_phy;
474 u8 phy_id = control_phy->phy_id;
475 struct asd_phy *phy = &ascb->ha->phys[phy_id];
476
477 u8 status = dl->status_block[0];
478 u8 oob_status = dl->status_block[1];
479 u8 oob_mode = dl->status_block[2];
480 /* u8 oob_signals= dl->status_block[3]; */
481
482 if (status != 0) {
483 ASD_DPRINTK("%s: phy%d status block opcode:0x%x\n",
484 __FUNCTION__, phy_id, status);
485 goto out;
486 }
487
488 switch (control_phy->sub_func) {
489 case DISABLE_PHY:
490 asd_ha->hw_prof.enabled_phys &= ~(1 << phy_id);
491 asd_turn_led(asd_ha, phy_id, 0);
492 asd_control_led(asd_ha, phy_id, 0);
493 ASD_DPRINTK("%s: disable phy%d\n", __FUNCTION__, phy_id);
494 break;
495
496 case ENABLE_PHY:
497 asd_control_led(asd_ha, phy_id, 1);
498 if (oob_status & CURRENT_OOB_DONE) {
499 asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
500 get_lrate_mode(phy, oob_mode);
501 asd_turn_led(asd_ha, phy_id, 1);
502 ASD_DPRINTK("%s: phy%d, lrate:0x%x, proto:0x%x\n",
503 __FUNCTION__, phy_id,phy->sas_phy.linkrate,
504 phy->sas_phy.iproto);
505 } else if (oob_status & CURRENT_SPINUP_HOLD) {
506 asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
507 asd_turn_led(asd_ha, phy_id, 1);
508 ASD_DPRINTK("%s: phy%d, spinup hold\n", __FUNCTION__,
509 phy_id);
510 } else if (oob_status & CURRENT_ERR_MASK) {
511 asd_turn_led(asd_ha, phy_id, 0);
512 ASD_DPRINTK("%s: phy%d: error: oob status:0x%02x\n",
513 __FUNCTION__, phy_id, oob_status);
514 } else if (oob_status & (CURRENT_HOT_PLUG_CNCT
515 | CURRENT_DEVICE_PRESENT)) {
516 asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
517 asd_turn_led(asd_ha, phy_id, 1);
518 ASD_DPRINTK("%s: phy%d: hot plug or device present\n",
519 __FUNCTION__, phy_id);
520 } else {
521 asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
522 asd_turn_led(asd_ha, phy_id, 0);
523 ASD_DPRINTK("%s: phy%d: no device present: "
524 "oob_status:0x%x\n",
525 __FUNCTION__, phy_id, oob_status);
526 }
527 break;
528 case RELEASE_SPINUP_HOLD:
529 case PHY_NO_OP:
530 case EXECUTE_HARD_RESET:
531 ASD_DPRINTK("%s: phy%d: sub_func:0x%x\n", __FUNCTION__,
532 phy_id, control_phy->sub_func);
533 /* XXX finish */
534 break;
535 default:
536 ASD_DPRINTK("%s: phy%d: sub_func:0x%x?\n", __FUNCTION__,
537 phy_id, control_phy->sub_func);
538 break;
539 }
540out:
541 asd_ascb_free(ascb);
542}
543
544static inline void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
545{
546 /* disable all speeds, then enable defaults */
547 *speed_mask = SAS_SPEED_60_DIS | SAS_SPEED_30_DIS | SAS_SPEED_15_DIS
548 | SATA_SPEED_30_DIS | SATA_SPEED_15_DIS;
549
550 switch (pd->max_sas_lrate) {
551 case SAS_LINK_RATE_6_0_GBPS:
552 *speed_mask &= ~SAS_SPEED_60_DIS;
553 default:
554 case SAS_LINK_RATE_3_0_GBPS:
555 *speed_mask &= ~SAS_SPEED_30_DIS;
556 case SAS_LINK_RATE_1_5_GBPS:
557 *speed_mask &= ~SAS_SPEED_15_DIS;
558 }
559
560 switch (pd->min_sas_lrate) {
561 case SAS_LINK_RATE_6_0_GBPS:
562 *speed_mask |= SAS_SPEED_30_DIS;
563 case SAS_LINK_RATE_3_0_GBPS:
564 *speed_mask |= SAS_SPEED_15_DIS;
565 default:
566 case SAS_LINK_RATE_1_5_GBPS:
567 /* nothing to do */
568 ;
569 }
570
571 switch (pd->max_sata_lrate) {
572 case SAS_LINK_RATE_3_0_GBPS:
573 *speed_mask &= ~SATA_SPEED_30_DIS;
574 default:
575 case SAS_LINK_RATE_1_5_GBPS:
576 *speed_mask &= ~SATA_SPEED_15_DIS;
577 }
578
579 switch (pd->min_sata_lrate) {
580 case SAS_LINK_RATE_3_0_GBPS:
581 *speed_mask |= SATA_SPEED_15_DIS;
582 default:
583 case SAS_LINK_RATE_1_5_GBPS:
584 /* nothing to do */
585 ;
586 }
587}
588
589/**
590 * asd_build_control_phy -- build a CONTROL PHY SCB
591 * @ascb: pointer to an ascb
592 * @phy_id: phy id to control, integer
593 * @subfunc: subfunction, what to actually to do the phy
594 *
595 * This function builds a CONTROL PHY scb. No allocation of any kind
596 * is performed. @ascb is allocated with the list function.
597 * The caller can override the ascb->tasklet_complete to point
598 * to its own callback function. It must call asd_ascb_free()
599 * at its tasklet complete function.
600 * See the default implementation.
601 */
602void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc)
603{
604 struct asd_phy *phy = &ascb->ha->phys[phy_id];
605 struct scb *scb = ascb->scb;
606 struct control_phy *control_phy = &scb->control_phy;
607
608 scb->header.opcode = CONTROL_PHY;
609 control_phy->phy_id = (u8) phy_id;
610 control_phy->sub_func = subfunc;
611
612 switch (subfunc) {
613 case EXECUTE_HARD_RESET: /* 0x81 */
614 case ENABLE_PHY: /* 0x01 */
615 /* decide hot plug delay */
616 control_phy->hot_plug_delay = HOTPLUG_DELAY_TIMEOUT;
617
618 /* decide speed mask */
619 set_speed_mask(&control_phy->speed_mask, phy->phy_desc);
620
621 /* initiator port settings are in the hi nibble */
622 if (phy->sas_phy.role == PHY_ROLE_INITIATOR)
623 control_phy->port_type = SAS_PROTO_ALL << 4;
624 else if (phy->sas_phy.role == PHY_ROLE_TARGET)
625 control_phy->port_type = SAS_PROTO_ALL;
626 else
627 control_phy->port_type =
628 (SAS_PROTO_ALL << 4) | SAS_PROTO_ALL;
629
630 /* link reset retries, this should be nominal */
631 control_phy->link_reset_retries = 10;
632
633 case RELEASE_SPINUP_HOLD: /* 0x02 */
634 /* decide the func_mask */
635 control_phy->func_mask = FUNCTION_MASK_DEFAULT;
636 if (phy->phy_desc->flags & ASD_SATA_SPINUP_HOLD)
637 control_phy->func_mask &= ~SPINUP_HOLD_DIS;
638 else
639 control_phy->func_mask |= SPINUP_HOLD_DIS;
640 }
641
642 control_phy->conn_handle = cpu_to_le16(0xFFFF);
643
644 ascb->tasklet_complete = control_phy_tasklet_complete;
645}
646
647/* ---------- INITIATE LINK ADM TASK ---------- */
648
649static void link_adm_tasklet_complete(struct asd_ascb *ascb,
650 struct done_list_struct *dl)
651{
652 u8 opcode = dl->opcode;
653 struct initiate_link_adm *link_adm = &ascb->scb->link_adm;
654 u8 phy_id = link_adm->phy_id;
655
656 if (opcode != TC_NO_ERROR) {
657 asd_printk("phy%d: link adm task 0x%x completed with error "
658 "0x%x\n", phy_id, link_adm->sub_func, opcode);
659 }
660 ASD_DPRINTK("phy%d: link adm task 0x%x: 0x%x\n",
661 phy_id, link_adm->sub_func, opcode);
662
663 asd_ascb_free(ascb);
664}
665
666void asd_build_initiate_link_adm_task(struct asd_ascb *ascb, int phy_id,
667 u8 subfunc)
668{
669 struct scb *scb = ascb->scb;
670 struct initiate_link_adm *link_adm = &scb->link_adm;
671
672 scb->header.opcode = INITIATE_LINK_ADM_TASK;
673
674 link_adm->phy_id = phy_id;
675 link_adm->sub_func = subfunc;
676 link_adm->conn_handle = cpu_to_le16(0xFFFF);
677
678 ascb->tasklet_complete = link_adm_tasklet_complete;
679}
680
681/* ---------- SCB timer ---------- */
682
683/**
684 * asd_ascb_timedout -- called when a pending SCB's timer has expired
685 * @data: unsigned long, a pointer to the ascb in question
686 *
687 * This is the default timeout function which does the most necessary.
688 * Upper layers can implement their own timeout function, say to free
689 * resources they have with this SCB, and then call this one at the
690 * end of their timeout function. To do this, one should initialize
691 * the ascb->timer.{function, data, expires} prior to calling the post
692 * funcion. The timer is started by the post function.
693 */
694void asd_ascb_timedout(unsigned long data)
695{
696 struct asd_ascb *ascb = (void *) data;
697 struct asd_seq_data *seq = &ascb->ha->seq;
698 unsigned long flags;
699
700 ASD_DPRINTK("scb:0x%x timed out\n", ascb->scb->header.opcode);
701
702 spin_lock_irqsave(&seq->pend_q_lock, flags);
703 seq->pending--;
704 list_del_init(&ascb->list);
705 spin_unlock_irqrestore(&seq->pend_q_lock, flags);
706
707 asd_ascb_free(ascb);
708}
709
710/* ---------- CONTROL PHY ---------- */
711
712/* Given the spec value, return a driver value. */
713static const int phy_func_table[] = {
714 [PHY_FUNC_NOP] = PHY_NO_OP,
715 [PHY_FUNC_LINK_RESET] = ENABLE_PHY,
716 [PHY_FUNC_HARD_RESET] = EXECUTE_HARD_RESET,
717 [PHY_FUNC_DISABLE] = DISABLE_PHY,
718 [PHY_FUNC_RELEASE_SPINUP_HOLD] = RELEASE_SPINUP_HOLD,
719};
720
721int asd_control_phy(struct asd_sas_phy *phy, enum phy_func func, void *arg)
722{
723 struct asd_ha_struct *asd_ha = phy->ha->lldd_ha;
724 struct asd_phy_desc *pd = asd_ha->phys[phy->id].phy_desc;
725 struct asd_ascb *ascb;
726 struct sas_phy_linkrates *rates;
727 int res = 1;
728
729 switch (func) {
730 case PHY_FUNC_CLEAR_ERROR_LOG:
731 return -ENOSYS;
732 case PHY_FUNC_SET_LINK_RATE:
733 rates = arg;
734 if (rates->minimum_linkrate) {
735 pd->min_sas_lrate = rates->minimum_linkrate;
736 pd->min_sata_lrate = rates->minimum_linkrate;
737 }
738 if (rates->maximum_linkrate) {
739 pd->max_sas_lrate = rates->maximum_linkrate;
740 pd->max_sata_lrate = rates->maximum_linkrate;
741 }
742 func = PHY_FUNC_LINK_RESET;
743 break;
744 default:
745 break;
746 }
747
748 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
749 if (!ascb)
750 return -ENOMEM;
751
752 asd_build_control_phy(ascb, phy->id, phy_func_table[func]);
753 res = asd_post_ascb_list(asd_ha, ascb , 1);
754 if (res)
755 asd_ascb_free(ascb);
756
757 return res;
758}
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c
new file mode 100644
index 000000000000..83574b5b4e69
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_sds.c
@@ -0,0 +1,1089 @@
1/*
2 * Aic94xx SAS/SATA driver access to shared data structures and memory
3 * maps.
4 *
5 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
6 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
7 *
8 * This file is licensed under GPLv2.
9 *
10 * This file is part of the aic94xx driver.
11 *
12 * The aic94xx driver is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License as
14 * published by the Free Software Foundation; version 2 of the
15 * License.
16 *
17 * The aic94xx driver is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with the aic94xx driver; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 *
26 */
27
28#include <linux/pci.h>
29#include <linux/delay.h>
30
31#include "aic94xx.h"
32#include "aic94xx_reg.h"
33
34/* ---------- OCM stuff ---------- */
35
36struct asd_ocm_dir_ent {
37 u8 type;
38 u8 offs[3];
39 u8 _r1;
40 u8 size[3];
41} __attribute__ ((packed));
42
43struct asd_ocm_dir {
44 char sig[2];
45 u8 _r1[2];
46 u8 major; /* 0 */
47 u8 minor; /* 0 */
48 u8 _r2;
49 u8 num_de;
50 struct asd_ocm_dir_ent entry[15];
51} __attribute__ ((packed));
52
53#define OCM_DE_OCM_DIR 0x00
54#define OCM_DE_WIN_DRVR 0x01
55#define OCM_DE_BIOS_CHIM 0x02
56#define OCM_DE_RAID_ENGN 0x03
57#define OCM_DE_BIOS_INTL 0x04
58#define OCM_DE_BIOS_CHIM_OSM 0x05
59#define OCM_DE_BIOS_CHIM_DYNAMIC 0x06
60#define OCM_DE_ADDC2C_RES0 0x07
61#define OCM_DE_ADDC2C_RES1 0x08
62#define OCM_DE_ADDC2C_RES2 0x09
63#define OCM_DE_ADDC2C_RES3 0x0A
64
65#define OCM_INIT_DIR_ENTRIES 5
66/***************************************************************************
67* OCM dircetory default
68***************************************************************************/
69static struct asd_ocm_dir OCMDirInit =
70{
71 .sig = {0x4D, 0x4F}, /* signature */
72 .num_de = OCM_INIT_DIR_ENTRIES, /* no. of directory entries */
73};
74
75/***************************************************************************
76* OCM dircetory Entries default
77***************************************************************************/
78static struct asd_ocm_dir_ent OCMDirEntriesInit[OCM_INIT_DIR_ENTRIES] =
79{
80 {
81 .type = (OCM_DE_ADDC2C_RES0), /* Entry type */
82 .offs = {128}, /* Offset */
83 .size = {0, 4}, /* size */
84 },
85 {
86 .type = (OCM_DE_ADDC2C_RES1), /* Entry type */
87 .offs = {128, 4}, /* Offset */
88 .size = {0, 4}, /* size */
89 },
90 {
91 .type = (OCM_DE_ADDC2C_RES2), /* Entry type */
92 .offs = {128, 8}, /* Offset */
93 .size = {0, 4}, /* size */
94 },
95 {
96 .type = (OCM_DE_ADDC2C_RES3), /* Entry type */
97 .offs = {128, 12}, /* Offset */
98 .size = {0, 4}, /* size */
99 },
100 {
101 .type = (OCM_DE_WIN_DRVR), /* Entry type */
102 .offs = {128, 16}, /* Offset */
103 .size = {128, 235, 1}, /* size */
104 },
105};
106
107struct asd_bios_chim_struct {
108 char sig[4];
109 u8 major; /* 1 */
110 u8 minor; /* 0 */
111 u8 bios_major;
112 u8 bios_minor;
113 __le32 bios_build;
114 u8 flags;
115 u8 pci_slot;
116 __le16 ue_num;
117 __le16 ue_size;
118 u8 _r[14];
119 /* The unit element array is right here.
120 */
121} __attribute__ ((packed));
122
123/**
124 * asd_read_ocm_seg - read an on chip memory (OCM) segment
125 * @asd_ha: pointer to the host adapter structure
126 * @buffer: where to write the read data
127 * @offs: offset into OCM where to read from
128 * @size: how many bytes to read
129 *
130 * Return the number of bytes not read. Return 0 on success.
131 */
132static int asd_read_ocm_seg(struct asd_ha_struct *asd_ha, void *buffer,
133 u32 offs, int size)
134{
135 u8 *p = buffer;
136 if (unlikely(asd_ha->iospace))
137 asd_read_reg_string(asd_ha, buffer, offs+OCM_BASE_ADDR, size);
138 else {
139 for ( ; size > 0; size--, offs++, p++)
140 *p = asd_read_ocm_byte(asd_ha, offs);
141 }
142 return size;
143}
144
145static int asd_read_ocm_dir(struct asd_ha_struct *asd_ha,
146 struct asd_ocm_dir *dir, u32 offs)
147{
148 int err = asd_read_ocm_seg(asd_ha, dir, offs, sizeof(*dir));
149 if (err) {
150 ASD_DPRINTK("couldn't read ocm segment\n");
151 return err;
152 }
153
154 if (dir->sig[0] != 'M' || dir->sig[1] != 'O') {
155 ASD_DPRINTK("no valid dir signature(%c%c) at start of OCM\n",
156 dir->sig[0], dir->sig[1]);
157 return -ENOENT;
158 }
159 if (dir->major != 0) {
160 asd_printk("unsupported major version of ocm dir:0x%x\n",
161 dir->major);
162 return -ENOENT;
163 }
164 dir->num_de &= 0xf;
165 return 0;
166}
167
168/**
169 * asd_write_ocm_seg - write an on chip memory (OCM) segment
170 * @asd_ha: pointer to the host adapter structure
171 * @buffer: where to read the write data
172 * @offs: offset into OCM to write to
173 * @size: how many bytes to write
174 *
175 * Return the number of bytes not written. Return 0 on success.
176 */
177static void asd_write_ocm_seg(struct asd_ha_struct *asd_ha, void *buffer,
178 u32 offs, int size)
179{
180 u8 *p = buffer;
181 if (unlikely(asd_ha->iospace))
182 asd_write_reg_string(asd_ha, buffer, offs+OCM_BASE_ADDR, size);
183 else {
184 for ( ; size > 0; size--, offs++, p++)
185 asd_write_ocm_byte(asd_ha, offs, *p);
186 }
187 return;
188}
189
190#define THREE_TO_NUM(X) ((X)[0] | ((X)[1] << 8) | ((X)[2] << 16))
191
192static int asd_find_dir_entry(struct asd_ocm_dir *dir, u8 type,
193 u32 *offs, u32 *size)
194{
195 int i;
196 struct asd_ocm_dir_ent *ent;
197
198 for (i = 0; i < dir->num_de; i++) {
199 if (dir->entry[i].type == type)
200 break;
201 }
202 if (i >= dir->num_de)
203 return -ENOENT;
204 ent = &dir->entry[i];
205 *offs = (u32) THREE_TO_NUM(ent->offs);
206 *size = (u32) THREE_TO_NUM(ent->size);
207 return 0;
208}
209
210#define OCM_BIOS_CHIM_DE 2
211#define BC_BIOS_PRESENT 1
212
213static int asd_get_bios_chim(struct asd_ha_struct *asd_ha,
214 struct asd_ocm_dir *dir)
215{
216 int err;
217 struct asd_bios_chim_struct *bc_struct;
218 u32 offs, size;
219
220 err = asd_find_dir_entry(dir, OCM_BIOS_CHIM_DE, &offs, &size);
221 if (err) {
222 ASD_DPRINTK("couldn't find BIOS_CHIM dir ent\n");
223 goto out;
224 }
225 err = -ENOMEM;
226 bc_struct = kmalloc(sizeof(*bc_struct), GFP_KERNEL);
227 if (!bc_struct) {
228 asd_printk("no memory for bios_chim struct\n");
229 goto out;
230 }
231 err = asd_read_ocm_seg(asd_ha, (void *)bc_struct, offs,
232 sizeof(*bc_struct));
233 if (err) {
234 ASD_DPRINTK("couldn't read ocm segment\n");
235 goto out2;
236 }
237 if (strncmp(bc_struct->sig, "SOIB", 4)
238 && strncmp(bc_struct->sig, "IPSA", 4)) {
239 ASD_DPRINTK("BIOS_CHIM entry has no valid sig(%c%c%c%c)\n",
240 bc_struct->sig[0], bc_struct->sig[1],
241 bc_struct->sig[2], bc_struct->sig[3]);
242 err = -ENOENT;
243 goto out2;
244 }
245 if (bc_struct->major != 1) {
246 asd_printk("BIOS_CHIM unsupported major version:0x%x\n",
247 bc_struct->major);
248 err = -ENOENT;
249 goto out2;
250 }
251 if (bc_struct->flags & BC_BIOS_PRESENT) {
252 asd_ha->hw_prof.bios.present = 1;
253 asd_ha->hw_prof.bios.maj = bc_struct->bios_major;
254 asd_ha->hw_prof.bios.min = bc_struct->bios_minor;
255 asd_ha->hw_prof.bios.bld = le32_to_cpu(bc_struct->bios_build);
256 ASD_DPRINTK("BIOS present (%d,%d), %d\n",
257 asd_ha->hw_prof.bios.maj,
258 asd_ha->hw_prof.bios.min,
259 asd_ha->hw_prof.bios.bld);
260 }
261 asd_ha->hw_prof.ue.num = le16_to_cpu(bc_struct->ue_num);
262 asd_ha->hw_prof.ue.size= le16_to_cpu(bc_struct->ue_size);
263 ASD_DPRINTK("ue num:%d, ue size:%d\n", asd_ha->hw_prof.ue.num,
264 asd_ha->hw_prof.ue.size);
265 size = asd_ha->hw_prof.ue.num * asd_ha->hw_prof.ue.size;
266 if (size > 0) {
267 err = -ENOMEM;
268 asd_ha->hw_prof.ue.area = kmalloc(size, GFP_KERNEL);
269 if (!asd_ha->hw_prof.ue.area)
270 goto out2;
271 err = asd_read_ocm_seg(asd_ha, (void *)asd_ha->hw_prof.ue.area,
272 offs + sizeof(*bc_struct), size);
273 if (err) {
274 kfree(asd_ha->hw_prof.ue.area);
275 asd_ha->hw_prof.ue.area = NULL;
276 asd_ha->hw_prof.ue.num = 0;
277 asd_ha->hw_prof.ue.size = 0;
278 ASD_DPRINTK("couldn't read ue entries(%d)\n", err);
279 }
280 }
281out2:
282 kfree(bc_struct);
283out:
284 return err;
285}
286
287static void
288asd_hwi_initialize_ocm_dir (struct asd_ha_struct *asd_ha)
289{
290 int i;
291
292 /* Zero OCM */
293 for (i = 0; i < OCM_MAX_SIZE; i += 4)
294 asd_write_ocm_dword(asd_ha, i, 0);
295
296 /* Write Dir */
297 asd_write_ocm_seg(asd_ha, &OCMDirInit, 0,
298 sizeof(struct asd_ocm_dir));
299
300 /* Write Dir Entries */
301 for (i = 0; i < OCM_INIT_DIR_ENTRIES; i++)
302 asd_write_ocm_seg(asd_ha, &OCMDirEntriesInit[i],
303 sizeof(struct asd_ocm_dir) +
304 (i * sizeof(struct asd_ocm_dir_ent))
305 , sizeof(struct asd_ocm_dir_ent));
306
307}
308
309static int
310asd_hwi_check_ocm_access (struct asd_ha_struct *asd_ha)
311{
312 struct pci_dev *pcidev = asd_ha->pcidev;
313 u32 reg;
314 int err = 0;
315 u32 v;
316
317 /* check if OCM has been initialized by BIOS */
318 reg = asd_read_reg_dword(asd_ha, EXSICNFGR);
319
320 if (!(reg & OCMINITIALIZED)) {
321 err = pci_read_config_dword(pcidev, PCIC_INTRPT_STAT, &v);
322 if (err) {
323 asd_printk("couldn't access PCIC_INTRPT_STAT of %s\n",
324 pci_name(pcidev));
325 goto out;
326 }
327
328 printk(KERN_INFO "OCM is not initialized by BIOS,"
329 "reinitialize it and ignore it, current IntrptStatus"
330 "is 0x%x\n", v);
331
332 if (v)
333 err = pci_write_config_dword(pcidev,
334 PCIC_INTRPT_STAT, v);
335 if (err) {
336 asd_printk("couldn't write PCIC_INTRPT_STAT of %s\n",
337 pci_name(pcidev));
338 goto out;
339 }
340
341 asd_hwi_initialize_ocm_dir(asd_ha);
342
343 }
344out:
345 return err;
346}
347
348/**
349 * asd_read_ocm - read on chip memory (OCM)
350 * @asd_ha: pointer to the host adapter structure
351 */
352int asd_read_ocm(struct asd_ha_struct *asd_ha)
353{
354 int err;
355 struct asd_ocm_dir *dir;
356
357 if (asd_hwi_check_ocm_access(asd_ha))
358 return -1;
359
360 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
361 if (!dir) {
362 asd_printk("no memory for ocm dir\n");
363 return -ENOMEM;
364 }
365
366 err = asd_read_ocm_dir(asd_ha, dir, 0);
367 if (err)
368 goto out;
369
370 err = asd_get_bios_chim(asd_ha, dir);
371out:
372 kfree(dir);
373 return err;
374}
375
376/* ---------- FLASH stuff ---------- */
377
378#define FLASH_RESET 0xF0
379
380#define FLASH_SIZE 0x200000
381#define FLASH_DIR_COOKIE "*** ADAPTEC FLASH DIRECTORY *** "
382#define FLASH_NEXT_ENTRY_OFFS 0x2000
383#define FLASH_MAX_DIR_ENTRIES 32
384
385#define FLASH_DE_TYPE_MASK 0x3FFFFFFF
386#define FLASH_DE_MS 0x120
387#define FLASH_DE_CTRL_A_USER 0xE0
388
389struct asd_flash_de {
390 __le32 type;
391 __le32 offs;
392 __le32 pad_size;
393 __le32 image_size;
394 __le32 chksum;
395 u8 _r[12];
396 u8 version[32];
397} __attribute__ ((packed));
398
399struct asd_flash_dir {
400 u8 cookie[32];
401 __le32 rev; /* 2 */
402 __le32 chksum;
403 __le32 chksum_antidote;
404 __le32 bld;
405 u8 bld_id[32]; /* build id data */
406 u8 ver_data[32]; /* date and time of build */
407 __le32 ae_mask;
408 __le32 v_mask;
409 __le32 oc_mask;
410 u8 _r[20];
411 struct asd_flash_de dir_entry[FLASH_MAX_DIR_ENTRIES];
412} __attribute__ ((packed));
413
414struct asd_manuf_sec {
415 char sig[2]; /* 'S', 'M' */
416 u16 offs_next;
417 u8 maj; /* 0 */
418 u8 min; /* 0 */
419 u16 chksum;
420 u16 size;
421 u8 _r[6];
422 u8 sas_addr[SAS_ADDR_SIZE];
423 u8 pcba_sn[ASD_PCBA_SN_SIZE];
424 /* Here start the other segments */
425 u8 linked_list[0];
426} __attribute__ ((packed));
427
428struct asd_manuf_phy_desc {
429 u8 state; /* low 4 bits */
430#define MS_PHY_STATE_ENABLEABLE 0
431#define MS_PHY_STATE_REPORTED 1
432#define MS_PHY_STATE_HIDDEN 2
433 u8 phy_id;
434 u16 _r;
435 u8 phy_control_0; /* mode 5 reg 0x160 */
436 u8 phy_control_1; /* mode 5 reg 0x161 */
437 u8 phy_control_2; /* mode 5 reg 0x162 */
438 u8 phy_control_3; /* mode 5 reg 0x163 */
439} __attribute__ ((packed));
440
441struct asd_manuf_phy_param {
442 char sig[2]; /* 'P', 'M' */
443 u16 next;
444 u8 maj; /* 0 */
445 u8 min; /* 2 */
446 u8 num_phy_desc; /* 8 */
447 u8 phy_desc_size; /* 8 */
448 u8 _r[3];
449 u8 usage_model_id;
450 u32 _r2;
451 struct asd_manuf_phy_desc phy_desc[ASD_MAX_PHYS];
452} __attribute__ ((packed));
453
454#if 0
455static const char *asd_sb_type[] = {
456 "unknown",
457 "SGPIO",
458 [2 ... 0x7F] = "unknown",
459 [0x80] = "ADPT_I2C",
460 [0x81 ... 0xFF] = "VENDOR_UNIQUExx"
461};
462#endif
463
464struct asd_ms_sb_desc {
465 u8 type;
466 u8 node_desc_index;
467 u8 conn_desc_index;
468 u8 _recvd[0];
469} __attribute__ ((packed));
470
471#if 0
472static const char *asd_conn_type[] = {
473 [0 ... 7] = "unknown",
474 "SFF8470",
475 "SFF8482",
476 "SFF8484",
477 [0x80] = "PCIX_DAUGHTER0",
478 [0x81] = "SAS_DAUGHTER0",
479 [0x82 ... 0xFF] = "VENDOR_UNIQUExx"
480};
481
482static const char *asd_conn_location[] = {
483 "unknown",
484 "internal",
485 "external",
486 "board_to_board",
487};
488#endif
489
490struct asd_ms_conn_desc {
491 u8 type;
492 u8 location;
493 u8 num_sideband_desc;
494 u8 size_sideband_desc;
495 u32 _resvd;
496 u8 name[16];
497 struct asd_ms_sb_desc sb_desc[0];
498} __attribute__ ((packed));
499
500struct asd_nd_phy_desc {
501 u8 vp_attch_type;
502 u8 attch_specific[0];
503} __attribute__ ((packed));
504
505#if 0
506static const char *asd_node_type[] = {
507 "IOP",
508 "IO_CONTROLLER",
509 "EXPANDER",
510 "PORT_MULTIPLIER",
511 "PORT_MULTIPLEXER",
512 "MULTI_DROP_I2C_BUS",
513};
514#endif
515
516struct asd_ms_node_desc {
517 u8 type;
518 u8 num_phy_desc;
519 u8 size_phy_desc;
520 u8 _resvd;
521 u8 name[16];
522 struct asd_nd_phy_desc phy_desc[0];
523} __attribute__ ((packed));
524
525struct asd_ms_conn_map {
526 char sig[2]; /* 'M', 'C' */
527 __le16 next;
528 u8 maj; /* 0 */
529 u8 min; /* 0 */
530 __le16 cm_size; /* size of this struct */
531 u8 num_conn;
532 u8 conn_size;
533 u8 num_nodes;
534 u8 usage_model_id;
535 u32 _resvd;
536 struct asd_ms_conn_desc conn_desc[0];
537 struct asd_ms_node_desc node_desc[0];
538} __attribute__ ((packed));
539
540struct asd_ctrla_phy_entry {
541 u8 sas_addr[SAS_ADDR_SIZE];
542 u8 sas_link_rates; /* max in hi bits, min in low bits */
543 u8 flags;
544 u8 sata_link_rates;
545 u8 _r[5];
546} __attribute__ ((packed));
547
548struct asd_ctrla_phy_settings {
549 u8 id0; /* P'h'y */
550 u8 _r;
551 u16 next;
552 u8 num_phys; /* number of PHYs in the PCI function */
553 u8 _r2[3];
554 struct asd_ctrla_phy_entry phy_ent[ASD_MAX_PHYS];
555} __attribute__ ((packed));
556
557struct asd_ll_el {
558 u8 id0;
559 u8 id1;
560 __le16 next;
561 u8 something_here[0];
562} __attribute__ ((packed));
563
564static int asd_poll_flash(struct asd_ha_struct *asd_ha)
565{
566 int c;
567 u8 d;
568
569 for (c = 5000; c > 0; c--) {
570 d = asd_read_reg_byte(asd_ha, asd_ha->hw_prof.flash.bar);
571 d ^= asd_read_reg_byte(asd_ha, asd_ha->hw_prof.flash.bar);
572 if (!d)
573 return 0;
574 udelay(5);
575 }
576 return -ENOENT;
577}
578
579static int asd_reset_flash(struct asd_ha_struct *asd_ha)
580{
581 int err;
582
583 err = asd_poll_flash(asd_ha);
584 if (err)
585 return err;
586 asd_write_reg_byte(asd_ha, asd_ha->hw_prof.flash.bar, FLASH_RESET);
587 err = asd_poll_flash(asd_ha);
588
589 return err;
590}
591
592static inline int asd_read_flash_seg(struct asd_ha_struct *asd_ha,
593 void *buffer, u32 offs, int size)
594{
595 asd_read_reg_string(asd_ha, buffer, asd_ha->hw_prof.flash.bar+offs,
596 size);
597 return 0;
598}
599
600/**
601 * asd_find_flash_dir - finds and reads the flash directory
602 * @asd_ha: pointer to the host adapter structure
603 * @flash_dir: pointer to flash directory structure
604 *
605 * If found, the flash directory segment will be copied to
606 * @flash_dir. Return 1 if found, 0 if not.
607 */
608static int asd_find_flash_dir(struct asd_ha_struct *asd_ha,
609 struct asd_flash_dir *flash_dir)
610{
611 u32 v;
612 for (v = 0; v < FLASH_SIZE; v += FLASH_NEXT_ENTRY_OFFS) {
613 asd_read_flash_seg(asd_ha, flash_dir, v,
614 sizeof(FLASH_DIR_COOKIE)-1);
615 if (memcmp(flash_dir->cookie, FLASH_DIR_COOKIE,
616 sizeof(FLASH_DIR_COOKIE)-1) == 0) {
617 asd_ha->hw_prof.flash.dir_offs = v;
618 asd_read_flash_seg(asd_ha, flash_dir, v,
619 sizeof(*flash_dir));
620 return 1;
621 }
622 }
623 return 0;
624}
625
626static int asd_flash_getid(struct asd_ha_struct *asd_ha)
627{
628 int err = 0;
629 u32 reg;
630
631 reg = asd_read_reg_dword(asd_ha, EXSICNFGR);
632
633 if (!(reg & FLASHEX)) {
634 ASD_DPRINTK("flash doesn't exist\n");
635 return -ENOENT;
636 }
637 if (pci_read_config_dword(asd_ha->pcidev, PCI_CONF_FLSH_BAR,
638 &asd_ha->hw_prof.flash.bar)) {
639 asd_printk("couldn't read PCI_CONF_FLSH_BAR of %s\n",
640 pci_name(asd_ha->pcidev));
641 return -ENOENT;
642 }
643 asd_ha->hw_prof.flash.present = 1;
644 asd_ha->hw_prof.flash.wide = reg & FLASHW ? 1 : 0;
645 err = asd_reset_flash(asd_ha);
646 if (err) {
647 ASD_DPRINTK("couldn't reset flash(%d)\n", err);
648 return err;
649 }
650 return 0;
651}
652
653static u16 asd_calc_flash_chksum(u16 *p, int size)
654{
655 u16 chksum = 0;
656
657 while (size-- > 0)
658 chksum += *p++;
659
660 return chksum;
661}
662
663
664static int asd_find_flash_de(struct asd_flash_dir *flash_dir, u32 entry_type,
665 u32 *offs, u32 *size)
666{
667 int i;
668 struct asd_flash_de *de;
669
670 for (i = 0; i < FLASH_MAX_DIR_ENTRIES; i++) {
671 u32 type = le32_to_cpu(flash_dir->dir_entry[i].type);
672
673 type &= FLASH_DE_TYPE_MASK;
674 if (type == entry_type)
675 break;
676 }
677 if (i >= FLASH_MAX_DIR_ENTRIES)
678 return -ENOENT;
679 de = &flash_dir->dir_entry[i];
680 *offs = le32_to_cpu(de->offs);
681 *size = le32_to_cpu(de->pad_size);
682 return 0;
683}
684
685static int asd_validate_ms(struct asd_manuf_sec *ms)
686{
687 if (ms->sig[0] != 'S' || ms->sig[1] != 'M') {
688 ASD_DPRINTK("manuf sec: no valid sig(%c%c)\n",
689 ms->sig[0], ms->sig[1]);
690 return -ENOENT;
691 }
692 if (ms->maj != 0) {
693 asd_printk("unsupported manuf. sector. major version:%x\n",
694 ms->maj);
695 return -ENOENT;
696 }
697 ms->offs_next = le16_to_cpu((__force __le16) ms->offs_next);
698 ms->chksum = le16_to_cpu((__force __le16) ms->chksum);
699 ms->size = le16_to_cpu((__force __le16) ms->size);
700
701 if (asd_calc_flash_chksum((u16 *)ms, ms->size/2)) {
702 asd_printk("failed manuf sector checksum\n");
703 }
704
705 return 0;
706}
707
708static int asd_ms_get_sas_addr(struct asd_ha_struct *asd_ha,
709 struct asd_manuf_sec *ms)
710{
711 memcpy(asd_ha->hw_prof.sas_addr, ms->sas_addr, SAS_ADDR_SIZE);
712 return 0;
713}
714
715static int asd_ms_get_pcba_sn(struct asd_ha_struct *asd_ha,
716 struct asd_manuf_sec *ms)
717{
718 memcpy(asd_ha->hw_prof.pcba_sn, ms->pcba_sn, ASD_PCBA_SN_SIZE);
719 asd_ha->hw_prof.pcba_sn[ASD_PCBA_SN_SIZE] = '\0';
720 return 0;
721}
722
723/**
724 * asd_find_ll_by_id - find a linked list entry by its id
725 * @start: void pointer to the first element in the linked list
726 * @id0: the first byte of the id (offs 0)
727 * @id1: the second byte of the id (offs 1)
728 *
729 * @start has to be the _base_ element start, since the
730 * linked list entries's offset is from this pointer.
731 * Some linked list entries use only the first id, in which case
732 * you can pass 0xFF for the second.
733 */
734static void *asd_find_ll_by_id(void * const start, const u8 id0, const u8 id1)
735{
736 struct asd_ll_el *el = start;
737
738 do {
739 switch (id1) {
740 default:
741 if (el->id1 == id1)
742 case 0xFF:
743 if (el->id0 == id0)
744 return el;
745 }
746 el = start + le16_to_cpu(el->next);
747 } while (el != start);
748
749 return NULL;
750}
751
752/**
753 * asd_ms_get_phy_params - get phy parameters from the manufacturing sector
754 * @asd_ha: pointer to the host adapter structure
755 * @manuf_sec: pointer to the manufacturing sector
756 *
757 * The manufacturing sector contans also the linked list of sub-segments,
758 * since when it was read, its size was taken from the flash directory,
759 * not from the structure size.
760 *
761 * HIDDEN phys do not count in the total count. REPORTED phys cannot
762 * be enabled but are reported and counted towards the total.
763 * ENEBLEABLE phys are enabled by default and count towards the total.
764 * The absolute total phy number is ASD_MAX_PHYS. hw_prof->num_phys
765 * merely specifies the number of phys the host adapter decided to
766 * report. E.g., it is possible for phys 0, 1 and 2 to be HIDDEN,
767 * phys 3, 4 and 5 to be REPORTED and phys 6 and 7 to be ENEBLEABLE.
768 * In this case ASD_MAX_PHYS is 8, hw_prof->num_phys is 5, and only 2
769 * are actually enabled (enabled by default, max number of phys
770 * enableable in this case).
771 */
772static int asd_ms_get_phy_params(struct asd_ha_struct *asd_ha,
773 struct asd_manuf_sec *manuf_sec)
774{
775 int i;
776 int en_phys = 0;
777 int rep_phys = 0;
778 struct asd_manuf_phy_param *phy_param;
779 struct asd_manuf_phy_param dflt_phy_param;
780
781 phy_param = asd_find_ll_by_id(manuf_sec, 'P', 'M');
782 if (!phy_param) {
783 ASD_DPRINTK("ms: no phy parameters found\n");
784 ASD_DPRINTK("ms: Creating default phy parameters\n");
785 dflt_phy_param.sig[0] = 'P';
786 dflt_phy_param.sig[1] = 'M';
787 dflt_phy_param.maj = 0;
788 dflt_phy_param.min = 2;
789 dflt_phy_param.num_phy_desc = 8;
790 dflt_phy_param.phy_desc_size = sizeof(struct asd_manuf_phy_desc);
791 for (i =0; i < ASD_MAX_PHYS; i++) {
792 dflt_phy_param.phy_desc[i].state = 0;
793 dflt_phy_param.phy_desc[i].phy_id = i;
794 dflt_phy_param.phy_desc[i].phy_control_0 = 0xf6;
795 dflt_phy_param.phy_desc[i].phy_control_1 = 0x10;
796 dflt_phy_param.phy_desc[i].phy_control_2 = 0x43;
797 dflt_phy_param.phy_desc[i].phy_control_3 = 0xeb;
798 }
799
800 phy_param = &dflt_phy_param;
801
802 }
803
804 if (phy_param->maj != 0) {
805 asd_printk("unsupported manuf. phy param major version:0x%x\n",
806 phy_param->maj);
807 return -ENOENT;
808 }
809
810 ASD_DPRINTK("ms: num_phy_desc: %d\n", phy_param->num_phy_desc);
811 asd_ha->hw_prof.enabled_phys = 0;
812 for (i = 0; i < phy_param->num_phy_desc; i++) {
813 struct asd_manuf_phy_desc *pd = &phy_param->phy_desc[i];
814 switch (pd->state & 0xF) {
815 case MS_PHY_STATE_HIDDEN:
816 ASD_DPRINTK("ms: phy%d: HIDDEN\n", i);
817 continue;
818 case MS_PHY_STATE_REPORTED:
819 ASD_DPRINTK("ms: phy%d: REPORTED\n", i);
820 asd_ha->hw_prof.enabled_phys &= ~(1 << i);
821 rep_phys++;
822 continue;
823 case MS_PHY_STATE_ENABLEABLE:
824 ASD_DPRINTK("ms: phy%d: ENEBLEABLE\n", i);
825 asd_ha->hw_prof.enabled_phys |= (1 << i);
826 en_phys++;
827 break;
828 }
829 asd_ha->hw_prof.phy_desc[i].phy_control_0 = pd->phy_control_0;
830 asd_ha->hw_prof.phy_desc[i].phy_control_1 = pd->phy_control_1;
831 asd_ha->hw_prof.phy_desc[i].phy_control_2 = pd->phy_control_2;
832 asd_ha->hw_prof.phy_desc[i].phy_control_3 = pd->phy_control_3;
833 }
834 asd_ha->hw_prof.max_phys = rep_phys + en_phys;
835 asd_ha->hw_prof.num_phys = en_phys;
836 ASD_DPRINTK("ms: max_phys:0x%x, num_phys:0x%x\n",
837 asd_ha->hw_prof.max_phys, asd_ha->hw_prof.num_phys);
838 ASD_DPRINTK("ms: enabled_phys:0x%x\n", asd_ha->hw_prof.enabled_phys);
839 return 0;
840}
841
842static int asd_ms_get_connector_map(struct asd_ha_struct *asd_ha,
843 struct asd_manuf_sec *manuf_sec)
844{
845 struct asd_ms_conn_map *cm;
846
847 cm = asd_find_ll_by_id(manuf_sec, 'M', 'C');
848 if (!cm) {
849 ASD_DPRINTK("ms: no connector map found\n");
850 return 0;
851 }
852
853 if (cm->maj != 0) {
854 ASD_DPRINTK("ms: unsupported: connector map major version 0x%x"
855 "\n", cm->maj);
856 return -ENOENT;
857 }
858
859 /* XXX */
860
861 return 0;
862}
863
864
865/**
866 * asd_process_ms - find and extract information from the manufacturing sector
867 * @asd_ha: pointer to the host adapter structure
868 * @flash_dir: pointer to the flash directory
869 */
870static int asd_process_ms(struct asd_ha_struct *asd_ha,
871 struct asd_flash_dir *flash_dir)
872{
873 int err;
874 struct asd_manuf_sec *manuf_sec;
875 u32 offs, size;
876
877 err = asd_find_flash_de(flash_dir, FLASH_DE_MS, &offs, &size);
878 if (err) {
879 ASD_DPRINTK("Couldn't find the manuf. sector\n");
880 goto out;
881 }
882
883 if (size == 0)
884 goto out;
885
886 err = -ENOMEM;
887 manuf_sec = kmalloc(size, GFP_KERNEL);
888 if (!manuf_sec) {
889 ASD_DPRINTK("no mem for manuf sector\n");
890 goto out;
891 }
892
893 err = asd_read_flash_seg(asd_ha, (void *)manuf_sec, offs, size);
894 if (err) {
895 ASD_DPRINTK("couldn't read manuf sector at 0x%x, size 0x%x\n",
896 offs, size);
897 goto out2;
898 }
899
900 err = asd_validate_ms(manuf_sec);
901 if (err) {
902 ASD_DPRINTK("couldn't validate manuf sector\n");
903 goto out2;
904 }
905
906 err = asd_ms_get_sas_addr(asd_ha, manuf_sec);
907 if (err) {
908 ASD_DPRINTK("couldn't read the SAS_ADDR\n");
909 goto out2;
910 }
911 ASD_DPRINTK("manuf sect SAS_ADDR %llx\n",
912 SAS_ADDR(asd_ha->hw_prof.sas_addr));
913
914 err = asd_ms_get_pcba_sn(asd_ha, manuf_sec);
915 if (err) {
916 ASD_DPRINTK("couldn't read the PCBA SN\n");
917 goto out2;
918 }
919 ASD_DPRINTK("manuf sect PCBA SN %s\n", asd_ha->hw_prof.pcba_sn);
920
921 err = asd_ms_get_phy_params(asd_ha, manuf_sec);
922 if (err) {
923 ASD_DPRINTK("ms: couldn't get phy parameters\n");
924 goto out2;
925 }
926
927 err = asd_ms_get_connector_map(asd_ha, manuf_sec);
928 if (err) {
929 ASD_DPRINTK("ms: couldn't get connector map\n");
930 goto out2;
931 }
932
933out2:
934 kfree(manuf_sec);
935out:
936 return err;
937}
938
939static int asd_process_ctrla_phy_settings(struct asd_ha_struct *asd_ha,
940 struct asd_ctrla_phy_settings *ps)
941{
942 int i;
943 for (i = 0; i < ps->num_phys; i++) {
944 struct asd_ctrla_phy_entry *pe = &ps->phy_ent[i];
945
946 if (!PHY_ENABLED(asd_ha, i))
947 continue;
948 if (*(u64 *)pe->sas_addr == 0) {
949 asd_ha->hw_prof.enabled_phys &= ~(1 << i);
950 continue;
951 }
952 /* This is the SAS address which should be sent in IDENTIFY. */
953 memcpy(asd_ha->hw_prof.phy_desc[i].sas_addr, pe->sas_addr,
954 SAS_ADDR_SIZE);
955 asd_ha->hw_prof.phy_desc[i].max_sas_lrate =
956 (pe->sas_link_rates & 0xF0) >> 4;
957 asd_ha->hw_prof.phy_desc[i].min_sas_lrate =
958 (pe->sas_link_rates & 0x0F);
959 asd_ha->hw_prof.phy_desc[i].max_sata_lrate =
960 (pe->sata_link_rates & 0xF0) >> 4;
961 asd_ha->hw_prof.phy_desc[i].min_sata_lrate =
962 (pe->sata_link_rates & 0x0F);
963 asd_ha->hw_prof.phy_desc[i].flags = pe->flags;
964 ASD_DPRINTK("ctrla: phy%d: sas_addr: %llx, sas rate:0x%x-0x%x,"
965 " sata rate:0x%x-0x%x, flags:0x%x\n",
966 i,
967 SAS_ADDR(asd_ha->hw_prof.phy_desc[i].sas_addr),
968 asd_ha->hw_prof.phy_desc[i].max_sas_lrate,
969 asd_ha->hw_prof.phy_desc[i].min_sas_lrate,
970 asd_ha->hw_prof.phy_desc[i].max_sata_lrate,
971 asd_ha->hw_prof.phy_desc[i].min_sata_lrate,
972 asd_ha->hw_prof.phy_desc[i].flags);
973 }
974
975 return 0;
976}
977
978/**
979 * asd_process_ctrl_a_user - process CTRL-A user settings
980 * @asd_ha: pointer to the host adapter structure
981 * @flash_dir: pointer to the flash directory
982 */
983static int asd_process_ctrl_a_user(struct asd_ha_struct *asd_ha,
984 struct asd_flash_dir *flash_dir)
985{
986 int err, i;
987 u32 offs, size;
988 struct asd_ll_el *el;
989 struct asd_ctrla_phy_settings *ps;
990 struct asd_ctrla_phy_settings dflt_ps;
991
992 err = asd_find_flash_de(flash_dir, FLASH_DE_CTRL_A_USER, &offs, &size);
993 if (err) {
994 ASD_DPRINTK("couldn't find CTRL-A user settings section\n");
995 ASD_DPRINTK("Creating default CTRL-A user settings section\n");
996
997 dflt_ps.id0 = 'h';
998 dflt_ps.num_phys = 8;
999 for (i =0; i < ASD_MAX_PHYS; i++) {
1000 memcpy(dflt_ps.phy_ent[i].sas_addr,
1001 asd_ha->hw_prof.sas_addr, SAS_ADDR_SIZE);
1002 dflt_ps.phy_ent[i].sas_link_rates = 0x98;
1003 dflt_ps.phy_ent[i].flags = 0x0;
1004 dflt_ps.phy_ent[i].sata_link_rates = 0x0;
1005 }
1006
1007 size = sizeof(struct asd_ctrla_phy_settings);
1008 ps = &dflt_ps;
1009 }
1010
1011 if (size == 0)
1012 goto out;
1013
1014 err = -ENOMEM;
1015 el = kmalloc(size, GFP_KERNEL);
1016 if (!el) {
1017 ASD_DPRINTK("no mem for ctrla user settings section\n");
1018 goto out;
1019 }
1020
1021 err = asd_read_flash_seg(asd_ha, (void *)el, offs, size);
1022 if (err) {
1023 ASD_DPRINTK("couldn't read ctrla phy settings section\n");
1024 goto out2;
1025 }
1026
1027 err = -ENOENT;
1028 ps = asd_find_ll_by_id(el, 'h', 0xFF);
1029 if (!ps) {
1030 ASD_DPRINTK("couldn't find ctrla phy settings struct\n");
1031 goto out2;
1032 }
1033
1034 err = asd_process_ctrla_phy_settings(asd_ha, ps);
1035 if (err) {
1036 ASD_DPRINTK("couldn't process ctrla phy settings\n");
1037 goto out2;
1038 }
1039out2:
1040 kfree(el);
1041out:
1042 return err;
1043}
1044
1045/**
1046 * asd_read_flash - read flash memory
1047 * @asd_ha: pointer to the host adapter structure
1048 */
1049int asd_read_flash(struct asd_ha_struct *asd_ha)
1050{
1051 int err;
1052 struct asd_flash_dir *flash_dir;
1053
1054 err = asd_flash_getid(asd_ha);
1055 if (err)
1056 return err;
1057
1058 flash_dir = kmalloc(sizeof(*flash_dir), GFP_KERNEL);
1059 if (!flash_dir)
1060 return -ENOMEM;
1061
1062 err = -ENOENT;
1063 if (!asd_find_flash_dir(asd_ha, flash_dir)) {
1064 ASD_DPRINTK("couldn't find flash directory\n");
1065 goto out;
1066 }
1067
1068 if (le32_to_cpu(flash_dir->rev) != 2) {
1069 asd_printk("unsupported flash dir version:0x%x\n",
1070 le32_to_cpu(flash_dir->rev));
1071 goto out;
1072 }
1073
1074 err = asd_process_ms(asd_ha, flash_dir);
1075 if (err) {
1076 ASD_DPRINTK("couldn't process manuf sector settings\n");
1077 goto out;
1078 }
1079
1080 err = asd_process_ctrl_a_user(asd_ha, flash_dir);
1081 if (err) {
1082 ASD_DPRINTK("couldn't process CTRL-A user settings\n");
1083 goto out;
1084 }
1085
1086out:
1087 kfree(flash_dir);
1088 return err;
1089}
diff --git a/drivers/scsi/aic94xx/aic94xx_seq.c b/drivers/scsi/aic94xx/aic94xx_seq.c
new file mode 100644
index 000000000000..d9b6da5fd06c
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_seq.c
@@ -0,0 +1,1404 @@
1/*
2 * Aic94xx SAS/SATA driver sequencer interface.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * Parts of this code adapted from David Chaw's adp94xx_seq.c.
8 *
9 * This file is licensed under GPLv2.
10 *
11 * This file is part of the aic94xx driver.
12 *
13 * The aic94xx driver is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation; version 2 of the
16 * License.
17 *
18 * The aic94xx driver is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with the aic94xx driver; if not, write to the Free Software
25 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
26 *
27 */
28
29#include <linux/delay.h>
30#include <linux/pci.h>
31#include <linux/module.h>
32#include <linux/firmware.h>
33#include "aic94xx_reg.h"
34#include "aic94xx_hwi.h"
35
36#include "aic94xx_seq.h"
37#include "aic94xx_dump.h"
38
39/* It takes no more than 0.05 us for an instruction
40 * to complete. So waiting for 1 us should be more than
41 * plenty.
42 */
43#define PAUSE_DELAY 1
44#define PAUSE_TRIES 1000
45
46static const struct firmware *sequencer_fw;
47static const char *sequencer_version;
48static u16 cseq_vecs[CSEQ_NUM_VECS], lseq_vecs[LSEQ_NUM_VECS], mode2_task,
49 cseq_idle_loop, lseq_idle_loop;
50static u8 *cseq_code, *lseq_code;
51static u32 cseq_code_size, lseq_code_size;
52
53static u16 first_scb_site_no = 0xFFFF;
54static u16 last_scb_site_no;
55
56/* ---------- Pause/Unpause CSEQ/LSEQ ---------- */
57
58/**
59 * asd_pause_cseq - pause the central sequencer
60 * @asd_ha: pointer to host adapter structure
61 *
62 * Return 0 on success, negative on failure.
63 */
64int asd_pause_cseq(struct asd_ha_struct *asd_ha)
65{
66 int count = PAUSE_TRIES;
67 u32 arp2ctl;
68
69 arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
70 if (arp2ctl & PAUSED)
71 return 0;
72
73 asd_write_reg_dword(asd_ha, CARP2CTL, arp2ctl | EPAUSE);
74 do {
75 arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
76 if (arp2ctl & PAUSED)
77 return 0;
78 udelay(PAUSE_DELAY);
79 } while (--count > 0);
80
81 ASD_DPRINTK("couldn't pause CSEQ\n");
82 return -1;
83}
84
85/**
86 * asd_unpause_cseq - unpause the central sequencer.
87 * @asd_ha: pointer to host adapter structure.
88 *
89 * Return 0 on success, negative on error.
90 */
91int asd_unpause_cseq(struct asd_ha_struct *asd_ha)
92{
93 u32 arp2ctl;
94 int count = PAUSE_TRIES;
95
96 arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
97 if (!(arp2ctl & PAUSED))
98 return 0;
99
100 asd_write_reg_dword(asd_ha, CARP2CTL, arp2ctl & ~EPAUSE);
101 do {
102 arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL);
103 if (!(arp2ctl & PAUSED))
104 return 0;
105 udelay(PAUSE_DELAY);
106 } while (--count > 0);
107
108 ASD_DPRINTK("couldn't unpause the CSEQ\n");
109 return -1;
110}
111
112/**
113 * asd_seq_pause_lseq - pause a link sequencer
114 * @asd_ha: pointer to a host adapter structure
115 * @lseq: link sequencer of interest
116 *
117 * Return 0 on success, negative on error.
118 */
119static inline int asd_seq_pause_lseq(struct asd_ha_struct *asd_ha, int lseq)
120{
121 u32 arp2ctl;
122 int count = PAUSE_TRIES;
123
124 arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
125 if (arp2ctl & PAUSED)
126 return 0;
127
128 asd_write_reg_dword(asd_ha, LmARP2CTL(lseq), arp2ctl | EPAUSE);
129 do {
130 arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
131 if (arp2ctl & PAUSED)
132 return 0;
133 udelay(PAUSE_DELAY);
134 } while (--count > 0);
135
136 ASD_DPRINTK("couldn't pause LSEQ %d\n", lseq);
137 return -1;
138}
139
140/**
141 * asd_pause_lseq - pause the link sequencer(s)
142 * @asd_ha: pointer to host adapter structure
143 * @lseq_mask: mask of link sequencers of interest
144 *
145 * Return 0 on success, negative on failure.
146 */
147int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask)
148{
149 int lseq;
150 int err = 0;
151
152 for_each_sequencer(lseq_mask, lseq_mask, lseq) {
153 err = asd_seq_pause_lseq(asd_ha, lseq);
154 if (err)
155 return err;
156 }
157
158 return err;
159}
160
161/**
162 * asd_seq_unpause_lseq - unpause a link sequencer
163 * @asd_ha: pointer to host adapter structure
164 * @lseq: link sequencer of interest
165 *
166 * Return 0 on success, negative on error.
167 */
168static inline int asd_seq_unpause_lseq(struct asd_ha_struct *asd_ha, int lseq)
169{
170 u32 arp2ctl;
171 int count = PAUSE_TRIES;
172
173 arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
174 if (!(arp2ctl & PAUSED))
175 return 0;
176
177 asd_write_reg_dword(asd_ha, LmARP2CTL(lseq), arp2ctl & ~EPAUSE);
178 do {
179 arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq));
180 if (!(arp2ctl & PAUSED))
181 return 0;
182 udelay(PAUSE_DELAY);
183 } while (--count > 0);
184
185 ASD_DPRINTK("couldn't unpause LSEQ %d\n", lseq);
186 return 0;
187}
188
189
190/**
191 * asd_unpause_lseq - unpause the link sequencer(s)
192 * @asd_ha: pointer to host adapter structure
193 * @lseq_mask: mask of link sequencers of interest
194 *
195 * Return 0 on success, negative on failure.
196 */
197int asd_unpause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask)
198{
199 int lseq;
200 int err = 0;
201
202 for_each_sequencer(lseq_mask, lseq_mask, lseq) {
203 err = asd_seq_unpause_lseq(asd_ha, lseq);
204 if (err)
205 return err;
206 }
207
208 return err;
209}
210
211/* ---------- Downloading CSEQ/LSEQ microcode ---------- */
212
213static int asd_verify_cseq(struct asd_ha_struct *asd_ha, const u8 *_prog,
214 u32 size)
215{
216 u32 addr = CSEQ_RAM_REG_BASE_ADR;
217 const u32 *prog = (u32 *) _prog;
218 u32 i;
219
220 for (i = 0; i < size; i += 4, prog++, addr += 4) {
221 u32 val = asd_read_reg_dword(asd_ha, addr);
222
223 if (le32_to_cpu(*prog) != val) {
224 asd_printk("%s: cseq verify failed at %u "
225 "read:0x%x, wanted:0x%x\n",
226 pci_name(asd_ha->pcidev),
227 i, val, le32_to_cpu(*prog));
228 return -1;
229 }
230 }
231 ASD_DPRINTK("verified %d bytes, passed\n", size);
232 return 0;
233}
234
235/**
236 * asd_verify_lseq - verify the microcode of a link sequencer
237 * @asd_ha: pointer to host adapter structure
238 * @_prog: pointer to the microcode
239 * @size: size of the microcode in bytes
240 * @lseq: link sequencer of interest
241 *
242 * The link sequencer code is accessed in 4 KB pages, which are selected
243 * by setting LmRAMPAGE (bits 8 and 9) of the LmBISTCTL1 register.
244 * The 10 KB LSEQm instruction code is mapped, page at a time, at
245 * LmSEQRAM address.
246 */
247static int asd_verify_lseq(struct asd_ha_struct *asd_ha, const u8 *_prog,
248 u32 size, int lseq)
249{
250#define LSEQ_CODEPAGE_SIZE 4096
251 int pages = (size + LSEQ_CODEPAGE_SIZE - 1) / LSEQ_CODEPAGE_SIZE;
252 u32 page;
253 const u32 *prog = (u32 *) _prog;
254
255 for (page = 0; page < pages; page++) {
256 u32 i;
257
258 asd_write_reg_dword(asd_ha, LmBISTCTL1(lseq),
259 page << LmRAMPAGE_LSHIFT);
260 for (i = 0; size > 0 && i < LSEQ_CODEPAGE_SIZE;
261 i += 4, prog++, size-=4) {
262
263 u32 val = asd_read_reg_dword(asd_ha, LmSEQRAM(lseq)+i);
264
265 if (le32_to_cpu(*prog) != val) {
266 asd_printk("%s: LSEQ%d verify failed "
267 "page:%d, offs:%d\n",
268 pci_name(asd_ha->pcidev),
269 lseq, page, i);
270 return -1;
271 }
272 }
273 }
274 ASD_DPRINTK("LSEQ%d verified %d bytes, passed\n", lseq,
275 (int)((u8 *)prog-_prog));
276 return 0;
277}
278
279/**
280 * asd_verify_seq -- verify CSEQ/LSEQ microcode
281 * @asd_ha: pointer to host adapter structure
282 * @prog: pointer to microcode
283 * @size: size of the microcode
284 * @lseq_mask: if 0, verify CSEQ microcode, else mask of LSEQs of interest
285 *
286 * Return 0 if microcode is correct, negative on mismatch.
287 */
288static int asd_verify_seq(struct asd_ha_struct *asd_ha, const u8 *prog,
289 u32 size, u8 lseq_mask)
290{
291 if (lseq_mask == 0)
292 return asd_verify_cseq(asd_ha, prog, size);
293 else {
294 int lseq, err;
295
296 for_each_sequencer(lseq_mask, lseq_mask, lseq) {
297 err = asd_verify_lseq(asd_ha, prog, size, lseq);
298 if (err)
299 return err;
300 }
301 }
302
303 return 0;
304}
305#define ASD_DMA_MODE_DOWNLOAD
306#ifdef ASD_DMA_MODE_DOWNLOAD
307/* This is the size of the CSEQ Mapped instruction page */
308#define MAX_DMA_OVLY_COUNT ((1U << 14)-1)
309static int asd_download_seq(struct asd_ha_struct *asd_ha,
310 const u8 * const prog, u32 size, u8 lseq_mask)
311{
312 u32 comstaten;
313 u32 reg;
314 int page;
315 const int pages = (size + MAX_DMA_OVLY_COUNT - 1) / MAX_DMA_OVLY_COUNT;
316 struct asd_dma_tok *token;
317 int err = 0;
318
319 if (size % 4) {
320 asd_printk("sequencer program not multiple of 4\n");
321 return -1;
322 }
323
324 asd_pause_cseq(asd_ha);
325 asd_pause_lseq(asd_ha, 0xFF);
326
327 /* save, disable and clear interrupts */
328 comstaten = asd_read_reg_dword(asd_ha, COMSTATEN);
329 asd_write_reg_dword(asd_ha, COMSTATEN, 0);
330 asd_write_reg_dword(asd_ha, COMSTAT, COMSTAT_MASK);
331
332 asd_write_reg_dword(asd_ha, CHIMINTEN, RST_CHIMINTEN);
333 asd_write_reg_dword(asd_ha, CHIMINT, CHIMINT_MASK);
334
335 token = asd_alloc_coherent(asd_ha, MAX_DMA_OVLY_COUNT, GFP_KERNEL);
336 if (!token) {
337 asd_printk("out of memory for dma SEQ download\n");
338 err = -ENOMEM;
339 goto out;
340 }
341 ASD_DPRINTK("dma-ing %d bytes\n", size);
342
343 for (page = 0; page < pages; page++) {
344 int i;
345 u32 left = min(size-page*MAX_DMA_OVLY_COUNT,
346 (u32)MAX_DMA_OVLY_COUNT);
347
348 memcpy(token->vaddr, prog + page*MAX_DMA_OVLY_COUNT, left);
349 asd_write_reg_addr(asd_ha, OVLYDMAADR, token->dma_handle);
350 asd_write_reg_dword(asd_ha, OVLYDMACNT, left);
351 reg = !page ? RESETOVLYDMA : 0;
352 reg |= (STARTOVLYDMA | OVLYHALTERR);
353 reg |= (lseq_mask ? (((u32)lseq_mask) << 8) : OVLYCSEQ);
354 /* Start DMA. */
355 asd_write_reg_dword(asd_ha, OVLYDMACTL, reg);
356
357 for (i = PAUSE_TRIES*100; i > 0; i--) {
358 u32 dmadone = asd_read_reg_dword(asd_ha, OVLYDMACTL);
359 if (!(dmadone & OVLYDMAACT))
360 break;
361 udelay(PAUSE_DELAY);
362 }
363 }
364
365 reg = asd_read_reg_dword(asd_ha, COMSTAT);
366 if (!(reg & OVLYDMADONE) || (reg & OVLYERR)
367 || (asd_read_reg_dword(asd_ha, CHIMINT) & DEVEXCEPT_MASK)){
368 asd_printk("%s: error DMA-ing sequencer code\n",
369 pci_name(asd_ha->pcidev));
370 err = -ENODEV;
371 }
372
373 asd_free_coherent(asd_ha, token);
374 out:
375 asd_write_reg_dword(asd_ha, COMSTATEN, comstaten);
376
377 return err ? : asd_verify_seq(asd_ha, prog, size, lseq_mask);
378}
379#else /* ASD_DMA_MODE_DOWNLOAD */
380static int asd_download_seq(struct asd_ha_struct *asd_ha, const u8 *_prog,
381 u32 size, u8 lseq_mask)
382{
383 int i;
384 u32 reg = 0;
385 const u32 *prog = (u32 *) _prog;
386
387 if (size % 4) {
388 asd_printk("sequencer program not multiple of 4\n");
389 return -1;
390 }
391
392 asd_pause_cseq(asd_ha);
393 asd_pause_lseq(asd_ha, 0xFF);
394
395 reg |= (lseq_mask ? (((u32)lseq_mask) << 8) : OVLYCSEQ);
396 reg |= PIOCMODE;
397
398 asd_write_reg_dword(asd_ha, OVLYDMACNT, size);
399 asd_write_reg_dword(asd_ha, OVLYDMACTL, reg);
400
401 ASD_DPRINTK("downloading %s sequencer%s in PIO mode...\n",
402 lseq_mask ? "LSEQ" : "CSEQ", lseq_mask ? "s" : "");
403
404 for (i = 0; i < size; i += 4, prog++)
405 asd_write_reg_dword(asd_ha, SPIODATA, *prog);
406
407 reg = (reg & ~PIOCMODE) | OVLYHALTERR;
408 asd_write_reg_dword(asd_ha, OVLYDMACTL, reg);
409
410 return asd_verify_seq(asd_ha, _prog, size, lseq_mask);
411}
412#endif /* ASD_DMA_MODE_DOWNLOAD */
413
414/**
415 * asd_seq_download_seqs - download the sequencer microcode
416 * @asd_ha: pointer to host adapter structure
417 *
418 * Download the central and link sequencer microcode.
419 */
420static int asd_seq_download_seqs(struct asd_ha_struct *asd_ha)
421{
422 int err;
423
424 if (!asd_ha->hw_prof.enabled_phys) {
425 asd_printk("%s: no enabled phys!\n", pci_name(asd_ha->pcidev));
426 return -ENODEV;
427 }
428
429 /* Download the CSEQ */
430 ASD_DPRINTK("downloading CSEQ...\n");
431 err = asd_download_seq(asd_ha, cseq_code, cseq_code_size, 0);
432 if (err) {
433 asd_printk("CSEQ download failed:%d\n", err);
434 return err;
435 }
436
437 /* Download the Link Sequencers code. All of the Link Sequencers
438 * microcode can be downloaded at the same time.
439 */
440 ASD_DPRINTK("downloading LSEQs...\n");
441 err = asd_download_seq(asd_ha, lseq_code, lseq_code_size,
442 asd_ha->hw_prof.enabled_phys);
443 if (err) {
444 /* Try it one at a time */
445 u8 lseq;
446 u8 lseq_mask = asd_ha->hw_prof.enabled_phys;
447
448 for_each_sequencer(lseq_mask, lseq_mask, lseq) {
449 err = asd_download_seq(asd_ha, lseq_code,
450 lseq_code_size, 1<<lseq);
451 if (err)
452 break;
453 }
454 }
455 if (err)
456 asd_printk("LSEQs download failed:%d\n", err);
457
458 return err;
459}
460
461/* ---------- Initializing the chip, chip memory, etc. ---------- */
462
463/**
464 * asd_init_cseq_mip - initialize CSEQ mode independent pages 4-7
465 * @asd_ha: pointer to host adapter structure
466 */
467static void asd_init_cseq_mip(struct asd_ha_struct *asd_ha)
468{
469 /* CSEQ Mode Independent, page 4 setup. */
470 asd_write_reg_word(asd_ha, CSEQ_Q_EXE_HEAD, 0xFFFF);
471 asd_write_reg_word(asd_ha, CSEQ_Q_EXE_TAIL, 0xFFFF);
472 asd_write_reg_word(asd_ha, CSEQ_Q_DONE_HEAD, 0xFFFF);
473 asd_write_reg_word(asd_ha, CSEQ_Q_DONE_TAIL, 0xFFFF);
474 asd_write_reg_word(asd_ha, CSEQ_Q_SEND_HEAD, 0xFFFF);
475 asd_write_reg_word(asd_ha, CSEQ_Q_SEND_TAIL, 0xFFFF);
476 asd_write_reg_word(asd_ha, CSEQ_Q_DMA2CHIM_HEAD, 0xFFFF);
477 asd_write_reg_word(asd_ha, CSEQ_Q_DMA2CHIM_TAIL, 0xFFFF);
478 asd_write_reg_word(asd_ha, CSEQ_Q_COPY_HEAD, 0xFFFF);
479 asd_write_reg_word(asd_ha, CSEQ_Q_COPY_TAIL, 0xFFFF);
480 asd_write_reg_word(asd_ha, CSEQ_REG0, 0);
481 asd_write_reg_word(asd_ha, CSEQ_REG1, 0);
482 asd_write_reg_dword(asd_ha, CSEQ_REG2, 0);
483 asd_write_reg_byte(asd_ha, CSEQ_LINK_CTL_Q_MAP, 0);
484 {
485 u8 con = asd_read_reg_byte(asd_ha, CCONEXIST);
486 u8 val = hweight8(con);
487 asd_write_reg_byte(asd_ha, CSEQ_MAX_CSEQ_MODE, (val<<4)|val);
488 }
489 asd_write_reg_word(asd_ha, CSEQ_FREE_LIST_HACK_COUNT, 0);
490
491 /* CSEQ Mode independent, page 5 setup. */
492 asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_QUEUE, 0);
493 asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_QUEUE+4, 0);
494 asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_COUNT, 0);
495 asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_COUNT+4, 0);
496 asd_write_reg_word(asd_ha, CSEQ_Q_EST_NEXUS_HEAD, 0xFFFF);
497 asd_write_reg_word(asd_ha, CSEQ_Q_EST_NEXUS_TAIL, 0xFFFF);
498 asd_write_reg_word(asd_ha, CSEQ_NEED_EST_NEXUS_SCB, 0);
499 asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_REQ_HEAD, 0);
500 asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_REQ_TAIL, 0);
501 asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_SCB_OFFSET, 0);
502
503 /* CSEQ Mode independent, page 6 setup. */
504 asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_RET_ADDR0, 0);
505 asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_RET_ADDR1, 0);
506 asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_SCBPTR, 0);
507 asd_write_reg_byte(asd_ha, CSEQ_INT_ROUT_MODE, 0);
508 asd_write_reg_byte(asd_ha, CSEQ_ISR_SCRATCH_FLAGS, 0);
509 asd_write_reg_word(asd_ha, CSEQ_ISR_SAVE_SINDEX, 0);
510 asd_write_reg_word(asd_ha, CSEQ_ISR_SAVE_DINDEX, 0);
511 asd_write_reg_word(asd_ha, CSEQ_Q_MONIRTT_HEAD, 0xFFFF);
512 asd_write_reg_word(asd_ha, CSEQ_Q_MONIRTT_TAIL, 0xFFFF);
513 /* Calculate the free scb mask. */
514 {
515 u16 cmdctx = asd_get_cmdctx_size(asd_ha);
516 cmdctx = (~((cmdctx/128)-1)) >> 8;
517 asd_write_reg_byte(asd_ha, CSEQ_FREE_SCB_MASK, (u8)cmdctx);
518 }
519 asd_write_reg_word(asd_ha, CSEQ_BUILTIN_FREE_SCB_HEAD,
520 first_scb_site_no);
521 asd_write_reg_word(asd_ha, CSEQ_BUILTIN_FREE_SCB_TAIL,
522 last_scb_site_no);
523 asd_write_reg_word(asd_ha, CSEQ_EXTENDED_FREE_SCB_HEAD, 0xFFFF);
524 asd_write_reg_word(asd_ha, CSEQ_EXTENDED_FREE_SCB_TAIL, 0xFFFF);
525
526 /* CSEQ Mode independent, page 7 setup. */
527 asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_QUEUE, 0);
528 asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_QUEUE+4, 0);
529 asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_COUNT, 0);
530 asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_COUNT+4, 0);
531 asd_write_reg_word(asd_ha, CSEQ_Q_EMPTY_HEAD, 0xFFFF);
532 asd_write_reg_word(asd_ha, CSEQ_Q_EMPTY_TAIL, 0xFFFF);
533 asd_write_reg_word(asd_ha, CSEQ_NEED_EMPTY_SCB, 0);
534 asd_write_reg_byte(asd_ha, CSEQ_EMPTY_REQ_HEAD, 0);
535 asd_write_reg_byte(asd_ha, CSEQ_EMPTY_REQ_TAIL, 0);
536 asd_write_reg_byte(asd_ha, CSEQ_EMPTY_SCB_OFFSET, 0);
537 asd_write_reg_word(asd_ha, CSEQ_PRIMITIVE_DATA, 0);
538 asd_write_reg_dword(asd_ha, CSEQ_TIMEOUT_CONST, 0);
539}
540
541/**
542 * asd_init_cseq_mdp - initialize CSEQ Mode dependent pages
543 * @asd_ha: pointer to host adapter structure
544 */
545static void asd_init_cseq_mdp(struct asd_ha_struct *asd_ha)
546{
547 int i;
548 int moffs;
549
550 moffs = CSEQ_PAGE_SIZE * 2;
551
552 /* CSEQ Mode dependent, modes 0-7, page 0 setup. */
553 for (i = 0; i < 8; i++) {
554 asd_write_reg_word(asd_ha, i*moffs+CSEQ_LRM_SAVE_SINDEX, 0);
555 asd_write_reg_word(asd_ha, i*moffs+CSEQ_LRM_SAVE_SCBPTR, 0);
556 asd_write_reg_word(asd_ha, i*moffs+CSEQ_Q_LINK_HEAD, 0xFFFF);
557 asd_write_reg_word(asd_ha, i*moffs+CSEQ_Q_LINK_TAIL, 0xFFFF);
558 asd_write_reg_byte(asd_ha, i*moffs+CSEQ_LRM_SAVE_SCRPAGE, 0);
559 }
560
561 /* CSEQ Mode dependent, mode 0-7, page 1 and 2 shall be ignored. */
562
563 /* CSEQ Mode dependent, mode 8, page 0 setup. */
564 asd_write_reg_word(asd_ha, CSEQ_RET_ADDR, 0xFFFF);
565 asd_write_reg_word(asd_ha, CSEQ_RET_SCBPTR, 0);
566 asd_write_reg_word(asd_ha, CSEQ_SAVE_SCBPTR, 0);
567 asd_write_reg_word(asd_ha, CSEQ_EMPTY_TRANS_CTX, 0);
568 asd_write_reg_word(asd_ha, CSEQ_RESP_LEN, 0);
569 asd_write_reg_word(asd_ha, CSEQ_TMF_SCBPTR, 0);
570 asd_write_reg_word(asd_ha, CSEQ_GLOBAL_PREV_SCB, 0);
571 asd_write_reg_word(asd_ha, CSEQ_GLOBAL_HEAD, 0);
572 asd_write_reg_word(asd_ha, CSEQ_CLEAR_LU_HEAD, 0);
573 asd_write_reg_byte(asd_ha, CSEQ_TMF_OPCODE, 0);
574 asd_write_reg_byte(asd_ha, CSEQ_SCRATCH_FLAGS, 0);
575 asd_write_reg_word(asd_ha, CSEQ_HSB_SITE, 0);
576 asd_write_reg_word(asd_ha, CSEQ_FIRST_INV_SCB_SITE,
577 (u16)last_scb_site_no+1);
578 asd_write_reg_word(asd_ha, CSEQ_FIRST_INV_DDB_SITE,
579 (u16)asd_ha->hw_prof.max_ddbs);
580
581 /* CSEQ Mode dependent, mode 8, page 1 setup. */
582 asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CLEAR, 0);
583 asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CLEAR + 4, 0);
584 asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CHECK, 0);
585 asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CHECK + 4, 0);
586
587 /* CSEQ Mode dependent, mode 8, page 2 setup. */
588 /* Tell the sequencer the bus address of the first SCB. */
589 asd_write_reg_addr(asd_ha, CSEQ_HQ_NEW_POINTER,
590 asd_ha->seq.next_scb.dma_handle);
591 ASD_DPRINTK("First SCB dma_handle: 0x%llx\n",
592 (unsigned long long)asd_ha->seq.next_scb.dma_handle);
593
594 /* Tell the sequencer the first Done List entry address. */
595 asd_write_reg_addr(asd_ha, CSEQ_HQ_DONE_BASE,
596 asd_ha->seq.actual_dl->dma_handle);
597
598 /* Initialize the Q_DONE_POINTER with the least significant
599 * 4 bytes of the first Done List address. */
600 asd_write_reg_dword(asd_ha, CSEQ_HQ_DONE_POINTER,
601 ASD_BUSADDR_LO(asd_ha->seq.actual_dl->dma_handle));
602
603 asd_write_reg_byte(asd_ha, CSEQ_HQ_DONE_PASS, ASD_DEF_DL_TOGGLE);
604
605 /* CSEQ Mode dependent, mode 8, page 3 shall be ignored. */
606}
607
608/**
609 * asd_init_cseq_scratch -- setup and init CSEQ
610 * @asd_ha: pointer to host adapter structure
611 *
612 * Setup and initialize Central sequencers. Initialiaze the mode
613 * independent and dependent scratch page to the default settings.
614 */
615static void asd_init_cseq_scratch(struct asd_ha_struct *asd_ha)
616{
617 asd_init_cseq_mip(asd_ha);
618 asd_init_cseq_mdp(asd_ha);
619}
620
621/**
622 * asd_init_lseq_mip -- initialize LSEQ Mode independent pages 0-3
623 * @asd_ha: pointer to host adapter structure
624 */
625static void asd_init_lseq_mip(struct asd_ha_struct *asd_ha, u8 lseq)
626{
627 int i;
628
629 /* LSEQ Mode independent page 0 setup. */
630 asd_write_reg_word(asd_ha, LmSEQ_Q_TGTXFR_HEAD(lseq), 0xFFFF);
631 asd_write_reg_word(asd_ha, LmSEQ_Q_TGTXFR_TAIL(lseq), 0xFFFF);
632 asd_write_reg_byte(asd_ha, LmSEQ_LINK_NUMBER(lseq), lseq);
633 asd_write_reg_byte(asd_ha, LmSEQ_SCRATCH_FLAGS(lseq),
634 ASD_NOTIFY_ENABLE_SPINUP);
635 asd_write_reg_dword(asd_ha, LmSEQ_CONNECTION_STATE(lseq),0x08000000);
636 asd_write_reg_word(asd_ha, LmSEQ_CONCTL(lseq), 0);
637 asd_write_reg_byte(asd_ha, LmSEQ_CONSTAT(lseq), 0);
638 asd_write_reg_byte(asd_ha, LmSEQ_CONNECTION_MODES(lseq), 0);
639 asd_write_reg_word(asd_ha, LmSEQ_REG1_ISR(lseq), 0);
640 asd_write_reg_word(asd_ha, LmSEQ_REG2_ISR(lseq), 0);
641 asd_write_reg_word(asd_ha, LmSEQ_REG3_ISR(lseq), 0);
642 asd_write_reg_dword(asd_ha, LmSEQ_REG0_ISR(lseq), 0);
643 asd_write_reg_dword(asd_ha, LmSEQ_REG0_ISR(lseq)+4, 0);
644
645 /* LSEQ Mode independent page 1 setup. */
646 asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR0(lseq), 0xFFFF);
647 asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR1(lseq), 0xFFFF);
648 asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR2(lseq), 0xFFFF);
649 asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR3(lseq), 0xFFFF);
650 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE0(lseq), 0);
651 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE1(lseq), 0);
652 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE2(lseq), 0);
653 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE3(lseq), 0);
654 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_HEAD(lseq), 0);
655 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_TAIL(lseq), 0);
656 asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_BUF_AVAIL(lseq), 0);
657 asd_write_reg_dword(asd_ha, LmSEQ_TIMEOUT_CONST(lseq), 0);
658 asd_write_reg_word(asd_ha, LmSEQ_ISR_SAVE_SINDEX(lseq), 0);
659 asd_write_reg_word(asd_ha, LmSEQ_ISR_SAVE_DINDEX(lseq), 0);
660
661 /* LSEQ Mode Independent page 2 setup. */
662 asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR0(lseq), 0xFFFF);
663 asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR1(lseq), 0xFFFF);
664 asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR2(lseq), 0xFFFF);
665 asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR3(lseq), 0xFFFF);
666 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD0(lseq), 0);
667 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD1(lseq), 0);
668 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD2(lseq), 0);
669 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD3(lseq), 0);
670 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_HEAD(lseq), 0);
671 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_TAIL(lseq), 0);
672 asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_BUFS_AVAIL(lseq), 0);
673 for (i = 0; i < 12; i += 4)
674 asd_write_reg_dword(asd_ha, LmSEQ_ATA_SCR_REGS(lseq) + i, 0);
675
676 /* LSEQ Mode Independent page 3 setup. */
677
678 /* Device present timer timeout */
679 asd_write_reg_dword(asd_ha, LmSEQ_DEV_PRES_TMR_TOUT_CONST(lseq),
680 ASD_DEV_PRESENT_TIMEOUT);
681
682 /* SATA interlock timer disabled */
683 asd_write_reg_dword(asd_ha, LmSEQ_SATA_INTERLOCK_TIMEOUT(lseq),
684 ASD_SATA_INTERLOCK_TIMEOUT);
685
686 /* STP shutdown timer timeout constant, IGNORED by the sequencer,
687 * always 0. */
688 asd_write_reg_dword(asd_ha, LmSEQ_STP_SHUTDOWN_TIMEOUT(lseq),
689 ASD_STP_SHUTDOWN_TIMEOUT);
690
691 asd_write_reg_dword(asd_ha, LmSEQ_SRST_ASSERT_TIMEOUT(lseq),
692 ASD_SRST_ASSERT_TIMEOUT);
693
694 asd_write_reg_dword(asd_ha, LmSEQ_RCV_FIS_TIMEOUT(lseq),
695 ASD_RCV_FIS_TIMEOUT);
696
697 asd_write_reg_dword(asd_ha, LmSEQ_ONE_MILLISEC_TIMEOUT(lseq),
698 ASD_ONE_MILLISEC_TIMEOUT);
699
700 /* COM_INIT timer */
701 asd_write_reg_dword(asd_ha, LmSEQ_TEN_MS_COMINIT_TIMEOUT(lseq),
702 ASD_TEN_MILLISEC_TIMEOUT);
703
704 asd_write_reg_dword(asd_ha, LmSEQ_SMP_RCV_TIMEOUT(lseq),
705 ASD_SMP_RCV_TIMEOUT);
706}
707
708/**
709 * asd_init_lseq_mdp -- initialize LSEQ mode dependent pages.
710 * @asd_ha: pointer to host adapter structure
711 */
712static void asd_init_lseq_mdp(struct asd_ha_struct *asd_ha, int lseq)
713{
714 int i;
715 u32 moffs;
716 u16 ret_addr[] = {
717 0xFFFF, /* mode 0 */
718 0xFFFF, /* mode 1 */
719 mode2_task, /* mode 2 */
720 0,
721 0xFFFF, /* mode 4/5 */
722 0xFFFF, /* mode 4/5 */
723 };
724
725 /*
726 * Mode 0,1,2 and 4/5 have common field on page 0 for the first
727 * 14 bytes.
728 */
729 for (i = 0; i < 3; i++) {
730 moffs = i * LSEQ_MODE_SCRATCH_SIZE;
731 asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq)+moffs,
732 ret_addr[i]);
733 asd_write_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq)+moffs, 0);
734 asd_write_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq)+moffs, 0);
735 asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq)+moffs,0xFFFF);
736 asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq)+moffs,0xFFFF);
737 asd_write_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq)+moffs,0);
738 asd_write_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq)+moffs,0);
739 }
740 /*
741 * Mode 5 page 0 overlaps the same scratch page with Mode 0 page 3.
742 */
743 asd_write_reg_word(asd_ha,
744 LmSEQ_RET_ADDR(lseq)+LSEQ_MODE5_PAGE0_OFFSET,
745 ret_addr[5]);
746 asd_write_reg_word(asd_ha,
747 LmSEQ_REG0_MODE(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0);
748 asd_write_reg_word(asd_ha,
749 LmSEQ_MODE_FLAGS(lseq)+LSEQ_MODE5_PAGE0_OFFSET, 0);
750 asd_write_reg_word(asd_ha,
751 LmSEQ_RET_ADDR2(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0xFFFF);
752 asd_write_reg_word(asd_ha,
753 LmSEQ_RET_ADDR1(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0xFFFF);
754 asd_write_reg_byte(asd_ha,
755 LmSEQ_OPCODE_TO_CSEQ(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0);
756 asd_write_reg_word(asd_ha,
757 LmSEQ_DATA_TO_CSEQ(lseq)+LSEQ_MODE5_PAGE0_OFFSET, 0);
758
759 /* LSEQ Mode dependent 0, page 0 setup. */
760 asd_write_reg_word(asd_ha, LmSEQ_FIRST_INV_DDB_SITE(lseq),
761 (u16)asd_ha->hw_prof.max_ddbs);
762 asd_write_reg_word(asd_ha, LmSEQ_EMPTY_TRANS_CTX(lseq), 0);
763 asd_write_reg_word(asd_ha, LmSEQ_RESP_LEN(lseq), 0);
764 asd_write_reg_word(asd_ha, LmSEQ_FIRST_INV_SCB_SITE(lseq),
765 (u16)last_scb_site_no+1);
766 asd_write_reg_word(asd_ha, LmSEQ_INTEN_SAVE(lseq),
767 (u16) LmM0INTEN_MASK & 0xFFFF0000 >> 16);
768 asd_write_reg_word(asd_ha, LmSEQ_INTEN_SAVE(lseq) + 2,
769 (u16) LmM0INTEN_MASK & 0xFFFF);
770 asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_FRM_LEN(lseq), 0);
771 asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_PROTOCOL(lseq), 0);
772 asd_write_reg_byte(asd_ha, LmSEQ_RESP_STATUS(lseq), 0);
773 asd_write_reg_byte(asd_ha, LmSEQ_LAST_LOADED_SGE(lseq), 0);
774 asd_write_reg_word(asd_ha, LmSEQ_SAVE_SCBPTR(lseq), 0);
775
776 /* LSEQ mode dependent, mode 1, page 0 setup. */
777 asd_write_reg_word(asd_ha, LmSEQ_Q_XMIT_HEAD(lseq), 0xFFFF);
778 asd_write_reg_word(asd_ha, LmSEQ_M1_EMPTY_TRANS_CTX(lseq), 0);
779 asd_write_reg_word(asd_ha, LmSEQ_INI_CONN_TAG(lseq), 0);
780 asd_write_reg_byte(asd_ha, LmSEQ_FAILED_OPEN_STATUS(lseq), 0);
781 asd_write_reg_byte(asd_ha, LmSEQ_XMIT_REQUEST_TYPE(lseq), 0);
782 asd_write_reg_byte(asd_ha, LmSEQ_M1_RESP_STATUS(lseq), 0);
783 asd_write_reg_byte(asd_ha, LmSEQ_M1_LAST_LOADED_SGE(lseq), 0);
784 asd_write_reg_word(asd_ha, LmSEQ_M1_SAVE_SCBPTR(lseq), 0);
785
786 /* LSEQ Mode dependent mode 2, page 0 setup */
787 asd_write_reg_word(asd_ha, LmSEQ_PORT_COUNTER(lseq), 0);
788 asd_write_reg_word(asd_ha, LmSEQ_PM_TABLE_PTR(lseq), 0);
789 asd_write_reg_word(asd_ha, LmSEQ_SATA_INTERLOCK_TMR_SAVE(lseq), 0);
790 asd_write_reg_word(asd_ha, LmSEQ_IP_BITL(lseq), 0);
791 asd_write_reg_word(asd_ha, LmSEQ_COPY_SMP_CONN_TAG(lseq), 0);
792 asd_write_reg_byte(asd_ha, LmSEQ_P0M2_OFFS1AH(lseq), 0);
793
794 /* LSEQ Mode dependent, mode 4/5, page 0 setup. */
795 asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_STATUS(lseq), 0);
796 asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_MODE(lseq), 0);
797 asd_write_reg_word(asd_ha, LmSEQ_Q_LINK_HEAD(lseq), 0xFFFF);
798 asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_ERR(lseq), 0);
799 asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_SIGNALS(lseq), 0);
800 asd_write_reg_byte(asd_ha, LmSEQ_SAS_RESET_MODE(lseq), 0);
801 asd_write_reg_byte(asd_ha, LmSEQ_LINK_RESET_RETRY_COUNT(lseq), 0);
802 asd_write_reg_byte(asd_ha, LmSEQ_NUM_LINK_RESET_RETRIES(lseq), 0);
803 asd_write_reg_word(asd_ha, LmSEQ_OOB_INT_ENABLES(lseq), 0);
804 /*
805 * Set the desired interval between transmissions of the NOTIFY
806 * (ENABLE SPINUP) primitive. Must be initilized to val - 1.
807 */
808 asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_TIMEOUT(lseq),
809 ASD_NOTIFY_TIMEOUT - 1);
810 /* No delay for the first NOTIFY to be sent to the attached target. */
811 asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_DOWN_COUNT(lseq),
812 ASD_NOTIFY_DOWN_COUNT);
813
814 /* LSEQ Mode dependent, mode 0 and 1, page 1 setup. */
815 for (i = 0; i < 2; i++) {
816 int j;
817 /* Start from Page 1 of Mode 0 and 1. */
818 moffs = LSEQ_PAGE_SIZE + i*LSEQ_MODE_SCRATCH_SIZE;
819 /* All the fields of page 1 can be intialized to 0. */
820 for (j = 0; j < LSEQ_PAGE_SIZE; j += 4)
821 asd_write_reg_dword(asd_ha, LmSCRATCH(lseq)+moffs+j,0);
822 }
823
824 /* LSEQ Mode dependent, mode 2, page 1 setup. */
825 asd_write_reg_dword(asd_ha, LmSEQ_INVALID_DWORD_COUNT(lseq), 0);
826 asd_write_reg_dword(asd_ha, LmSEQ_DISPARITY_ERROR_COUNT(lseq), 0);
827 asd_write_reg_dword(asd_ha, LmSEQ_LOSS_OF_SYNC_COUNT(lseq), 0);
828
829 /* LSEQ Mode dependent, mode 4/5, page 1. */
830 for (i = 0; i < LSEQ_PAGE_SIZE; i+=4)
831 asd_write_reg_dword(asd_ha, LmSEQ_FRAME_TYPE_MASK(lseq)+i, 0);
832 asd_write_reg_byte(asd_ha, LmSEQ_FRAME_TYPE_MASK(lseq), 0xFF);
833 asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq), 0xFF);
834 asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq)+1,0xFF);
835 asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq)+2,0xFF);
836 asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq), 0xFF);
837 asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq)+1, 0xFF);
838 asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq)+2, 0xFF);
839 asd_write_reg_dword(asd_ha, LmSEQ_DATA_OFFSET(lseq), 0xFFFFFFFF);
840
841 /* LSEQ Mode dependent, mode 0, page 2 setup. */
842 asd_write_reg_dword(asd_ha, LmSEQ_SMP_RCV_TIMER_TERM_TS(lseq), 0);
843 asd_write_reg_byte(asd_ha, LmSEQ_DEVICE_BITS(lseq), 0);
844 asd_write_reg_word(asd_ha, LmSEQ_SDB_DDB(lseq), 0);
845 asd_write_reg_byte(asd_ha, LmSEQ_SDB_NUM_TAGS(lseq), 0);
846 asd_write_reg_byte(asd_ha, LmSEQ_SDB_CURR_TAG(lseq), 0);
847
848 /* LSEQ Mode Dependent 1, page 2 setup. */
849 asd_write_reg_dword(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(lseq), 0);
850 asd_write_reg_dword(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(lseq)+4, 0);
851 asd_write_reg_dword(asd_ha, LmSEQ_OPEN_TIMER_TERM_TS(lseq), 0);
852 asd_write_reg_dword(asd_ha, LmSEQ_SRST_AS_TIMER_TERM_TS(lseq), 0);
853 asd_write_reg_dword(asd_ha, LmSEQ_LAST_LOADED_SG_EL(lseq), 0);
854
855 /* LSEQ Mode Dependent 2, page 2 setup. */
856 /* The LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS is IGNORED by the sequencer,
857 * i.e. always 0. */
858 asd_write_reg_dword(asd_ha, LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS(lseq),0);
859 asd_write_reg_dword(asd_ha, LmSEQ_CLOSE_TIMER_TERM_TS(lseq), 0);
860 asd_write_reg_dword(asd_ha, LmSEQ_BREAK_TIMER_TERM_TS(lseq), 0);
861 asd_write_reg_dword(asd_ha, LmSEQ_DWS_RESET_TIMER_TERM_TS(lseq), 0);
862 asd_write_reg_dword(asd_ha,LmSEQ_SATA_INTERLOCK_TIMER_TERM_TS(lseq),0);
863 asd_write_reg_dword(asd_ha, LmSEQ_MCTL_TIMER_TERM_TS(lseq), 0);
864
865 /* LSEQ Mode Dependent 4/5, page 2 setup. */
866 asd_write_reg_dword(asd_ha, LmSEQ_COMINIT_TIMER_TERM_TS(lseq), 0);
867 asd_write_reg_dword(asd_ha, LmSEQ_RCV_ID_TIMER_TERM_TS(lseq), 0);
868 asd_write_reg_dword(asd_ha, LmSEQ_RCV_FIS_TIMER_TERM_TS(lseq), 0);
869 asd_write_reg_dword(asd_ha, LmSEQ_DEV_PRES_TIMER_TERM_TS(lseq), 0);
870}
871
872/**
873 * asd_init_lseq_scratch -- setup and init link sequencers
874 * @asd_ha: pointer to host adapter struct
875 */
876static void asd_init_lseq_scratch(struct asd_ha_struct *asd_ha)
877{
878 u8 lseq;
879 u8 lseq_mask;
880
881 lseq_mask = asd_ha->hw_prof.enabled_phys;
882 for_each_sequencer(lseq_mask, lseq_mask, lseq) {
883 asd_init_lseq_mip(asd_ha, lseq);
884 asd_init_lseq_mdp(asd_ha, lseq);
885 }
886}
887
888/**
889 * asd_init_scb_sites -- initialize sequencer SCB sites (memory).
890 * @asd_ha: pointer to host adapter structure
891 *
892 * This should be done before initializing common CSEQ and LSEQ
893 * scratch since those areas depend on some computed values here,
894 * last_scb_site_no, etc.
895 */
896static void asd_init_scb_sites(struct asd_ha_struct *asd_ha)
897{
898 u16 site_no;
899 u16 max_scbs = 0;
900
901 for (site_no = asd_ha->hw_prof.max_scbs-1;
902 site_no != (u16) -1;
903 site_no--) {
904 u16 i;
905
906 /* Initialize all fields in the SCB site to 0. */
907 for (i = 0; i < ASD_SCB_SIZE; i += 4)
908 asd_scbsite_write_dword(asd_ha, site_no, i, 0);
909
910 /* Workaround needed by SEQ to fix a SATA issue is to exclude
911 * certain SCB sites from the free list. */
912 if (!SCB_SITE_VALID(site_no))
913 continue;
914
915 if (last_scb_site_no == 0)
916 last_scb_site_no = site_no;
917
918 /* For every SCB site, we need to initialize the
919 * following fields: Q_NEXT, SCB_OPCODE, SCB_FLAGS,
920 * and SG Element Flag. */
921
922 /* Q_NEXT field of the last SCB is invalidated. */
923 asd_scbsite_write_word(asd_ha, site_no, 0, first_scb_site_no);
924
925 /* Initialize SCB Site Opcode field to invalid. */
926 asd_scbsite_write_byte(asd_ha, site_no,
927 offsetof(struct scb_header, opcode),
928 0xFF);
929
930 /* Initialize SCB Site Flags field to mean a response
931 * frame has been received. This means inadvertent
932 * frames received to be dropped. */
933 asd_scbsite_write_byte(asd_ha, site_no, 0x49, 0x01);
934
935 first_scb_site_no = site_no;
936 max_scbs++;
937 }
938 asd_ha->hw_prof.max_scbs = max_scbs;
939 ASD_DPRINTK("max_scbs:%d\n", asd_ha->hw_prof.max_scbs);
940 ASD_DPRINTK("first_scb_site_no:0x%x\n", first_scb_site_no);
941 ASD_DPRINTK("last_scb_site_no:0x%x\n", last_scb_site_no);
942}
943
944/**
945 * asd_init_cseq_cio - initialize CSEQ CIO registers
946 * @asd_ha: pointer to host adapter structure
947 */
948static void asd_init_cseq_cio(struct asd_ha_struct *asd_ha)
949{
950 int i;
951
952 asd_write_reg_byte(asd_ha, CSEQCOMINTEN, 0);
953 asd_write_reg_byte(asd_ha, CSEQDLCTL, ASD_DL_SIZE_BITS);
954 asd_write_reg_byte(asd_ha, CSEQDLOFFS, 0);
955 asd_write_reg_byte(asd_ha, CSEQDLOFFS+1, 0);
956 asd_ha->seq.scbpro = 0;
957 asd_write_reg_dword(asd_ha, SCBPRO, 0);
958 asd_write_reg_dword(asd_ha, CSEQCON, 0);
959
960 /* Intialize CSEQ Mode 11 Interrupt Vectors.
961 * The addresses are 16 bit wide and in dword units.
962 * The values of their macros are in byte units.
963 * Thus we have to divide by 4. */
964 asd_write_reg_word(asd_ha, CM11INTVEC0, cseq_vecs[0]);
965 asd_write_reg_word(asd_ha, CM11INTVEC1, cseq_vecs[1]);
966 asd_write_reg_word(asd_ha, CM11INTVEC2, cseq_vecs[2]);
967
968 /* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */
969 asd_write_reg_byte(asd_ha, CARP2INTEN, EN_ARP2HALTC);
970
971 /* Initialize CSEQ Scratch Page to 0x04. */
972 asd_write_reg_byte(asd_ha, CSCRATCHPAGE, 0x04);
973
974 /* Initialize CSEQ Mode[0-8] Dependent registers. */
975 /* Initialize Scratch Page to 0. */
976 for (i = 0; i < 9; i++)
977 asd_write_reg_byte(asd_ha, CMnSCRATCHPAGE(i), 0);
978
979 /* Reset the ARP2 Program Count. */
980 asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop);
981
982 for (i = 0; i < 8; i++) {
983 /* Intialize Mode n Link m Interrupt Enable. */
984 asd_write_reg_dword(asd_ha, CMnINTEN(i), EN_CMnRSPMBXF);
985 /* Initialize Mode n Request Mailbox. */
986 asd_write_reg_dword(asd_ha, CMnREQMBX(i), 0);
987 }
988}
989
990/**
991 * asd_init_lseq_cio -- initialize LmSEQ CIO registers
992 * @asd_ha: pointer to host adapter structure
993 */
994static void asd_init_lseq_cio(struct asd_ha_struct *asd_ha, int lseq)
995{
996 u8 *sas_addr;
997 int i;
998
999 /* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */
1000 asd_write_reg_dword(asd_ha, LmARP2INTEN(lseq), EN_ARP2HALTC);
1001
1002 asd_write_reg_byte(asd_ha, LmSCRATCHPAGE(lseq), 0);
1003
1004 /* Initialize Mode 0,1, and 2 SCRATCHPAGE to 0. */
1005 for (i = 0; i < 3; i++)
1006 asd_write_reg_byte(asd_ha, LmMnSCRATCHPAGE(lseq, i), 0);
1007
1008 /* Initialize Mode 5 SCRATCHPAGE to 0. */
1009 asd_write_reg_byte(asd_ha, LmMnSCRATCHPAGE(lseq, 5), 0);
1010
1011 asd_write_reg_dword(asd_ha, LmRSPMBX(lseq), 0);
1012 /* Initialize Mode 0,1,2 and 5 Interrupt Enable and
1013 * Interrupt registers. */
1014 asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 0), LmM0INTEN_MASK);
1015 asd_write_reg_dword(asd_ha, LmMnINT(lseq, 0), 0xFFFFFFFF);
1016 /* Mode 1 */
1017 asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 1), LmM1INTEN_MASK);
1018 asd_write_reg_dword(asd_ha, LmMnINT(lseq, 1), 0xFFFFFFFF);
1019 /* Mode 2 */
1020 asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 2), LmM2INTEN_MASK);
1021 asd_write_reg_dword(asd_ha, LmMnINT(lseq, 2), 0xFFFFFFFF);
1022 /* Mode 5 */
1023 asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 5), LmM5INTEN_MASK);
1024 asd_write_reg_dword(asd_ha, LmMnINT(lseq, 5), 0xFFFFFFFF);
1025
1026 /* Enable HW Timer status. */
1027 asd_write_reg_byte(asd_ha, LmHWTSTATEN(lseq), LmHWTSTATEN_MASK);
1028
1029 /* Enable Primitive Status 0 and 1. */
1030 asd_write_reg_dword(asd_ha, LmPRIMSTAT0EN(lseq), LmPRIMSTAT0EN_MASK);
1031 asd_write_reg_dword(asd_ha, LmPRIMSTAT1EN(lseq), LmPRIMSTAT1EN_MASK);
1032
1033 /* Enable Frame Error. */
1034 asd_write_reg_dword(asd_ha, LmFRMERREN(lseq), LmFRMERREN_MASK);
1035 asd_write_reg_byte(asd_ha, LmMnHOLDLVL(lseq, 0), 0x50);
1036
1037 /* Initialize Mode 0 Transfer Level to 512. */
1038 asd_write_reg_byte(asd_ha, LmMnXFRLVL(lseq, 0), LmMnXFRLVL_512);
1039 /* Initialize Mode 1 Transfer Level to 256. */
1040 asd_write_reg_byte(asd_ha, LmMnXFRLVL(lseq, 1), LmMnXFRLVL_256);
1041
1042 /* Initialize Program Count. */
1043 asd_write_reg_word(asd_ha, LmPRGMCNT(lseq), lseq_idle_loop);
1044
1045 /* Enable Blind SG Move. */
1046 asd_write_reg_dword(asd_ha, LmMODECTL(lseq), LmBLIND48);
1047 asd_write_reg_word(asd_ha, LmM3SATATIMER(lseq),
1048 ASD_SATA_INTERLOCK_TIMEOUT);
1049
1050 (void) asd_read_reg_dword(asd_ha, LmREQMBX(lseq));
1051
1052 /* Clear Primitive Status 0 and 1. */
1053 asd_write_reg_dword(asd_ha, LmPRMSTAT0(lseq), 0xFFFFFFFF);
1054 asd_write_reg_dword(asd_ha, LmPRMSTAT1(lseq), 0xFFFFFFFF);
1055
1056 /* Clear HW Timer status. */
1057 asd_write_reg_byte(asd_ha, LmHWTSTAT(lseq), 0xFF);
1058
1059 /* Clear DMA Errors for Mode 0 and 1. */
1060 asd_write_reg_byte(asd_ha, LmMnDMAERRS(lseq, 0), 0xFF);
1061 asd_write_reg_byte(asd_ha, LmMnDMAERRS(lseq, 1), 0xFF);
1062
1063 /* Clear SG DMA Errors for Mode 0 and 1. */
1064 asd_write_reg_byte(asd_ha, LmMnSGDMAERRS(lseq, 0), 0xFF);
1065 asd_write_reg_byte(asd_ha, LmMnSGDMAERRS(lseq, 1), 0xFF);
1066
1067 /* Clear Mode 0 Buffer Parity Error. */
1068 asd_write_reg_byte(asd_ha, LmMnBUFSTAT(lseq, 0), LmMnBUFPERR);
1069
1070 /* Clear Mode 0 Frame Error register. */
1071 asd_write_reg_dword(asd_ha, LmMnFRMERR(lseq, 0), 0xFFFFFFFF);
1072
1073 /* Reset LSEQ external interrupt arbiter. */
1074 asd_write_reg_byte(asd_ha, LmARP2INTCTL(lseq), RSTINTCTL);
1075
1076 /* Set the Phy SAS for the LmSEQ WWN. */
1077 sas_addr = asd_ha->phys[lseq].phy_desc->sas_addr;
1078 for (i = 0; i < SAS_ADDR_SIZE; i++)
1079 asd_write_reg_byte(asd_ha, LmWWN(lseq) + i, sas_addr[i]);
1080
1081 /* Set the Transmit Size to 1024 bytes, 0 = 256 Dwords. */
1082 asd_write_reg_byte(asd_ha, LmMnXMTSIZE(lseq, 1), 0);
1083
1084 /* Set the Bus Inactivity Time Limit Timer. */
1085 asd_write_reg_word(asd_ha, LmBITL_TIMER(lseq), 9);
1086
1087 /* Enable SATA Port Multiplier. */
1088 asd_write_reg_byte(asd_ha, LmMnSATAFS(lseq, 1), 0x80);
1089
1090 /* Initialize Interrupt Vector[0-10] address in Mode 3.
1091 * See the comment on CSEQ_INT_* */
1092 asd_write_reg_word(asd_ha, LmM3INTVEC0(lseq), lseq_vecs[0]);
1093 asd_write_reg_word(asd_ha, LmM3INTVEC1(lseq), lseq_vecs[1]);
1094 asd_write_reg_word(asd_ha, LmM3INTVEC2(lseq), lseq_vecs[2]);
1095 asd_write_reg_word(asd_ha, LmM3INTVEC3(lseq), lseq_vecs[3]);
1096 asd_write_reg_word(asd_ha, LmM3INTVEC4(lseq), lseq_vecs[4]);
1097 asd_write_reg_word(asd_ha, LmM3INTVEC5(lseq), lseq_vecs[5]);
1098 asd_write_reg_word(asd_ha, LmM3INTVEC6(lseq), lseq_vecs[6]);
1099 asd_write_reg_word(asd_ha, LmM3INTVEC7(lseq), lseq_vecs[7]);
1100 asd_write_reg_word(asd_ha, LmM3INTVEC8(lseq), lseq_vecs[8]);
1101 asd_write_reg_word(asd_ha, LmM3INTVEC9(lseq), lseq_vecs[9]);
1102 asd_write_reg_word(asd_ha, LmM3INTVEC10(lseq), lseq_vecs[10]);
1103 /*
1104 * Program the Link LED control, applicable only for
1105 * Chip Rev. B or later.
1106 */
1107 asd_write_reg_dword(asd_ha, LmCONTROL(lseq),
1108 (LEDTIMER | LEDMODE_TXRX | LEDTIMERS_100ms));
1109
1110 /* Set the Align Rate for SAS and STP mode. */
1111 asd_write_reg_byte(asd_ha, LmM1SASALIGN(lseq), SAS_ALIGN_DEFAULT);
1112 asd_write_reg_byte(asd_ha, LmM1STPALIGN(lseq), STP_ALIGN_DEFAULT);
1113}
1114
1115
1116/**
1117 * asd_post_init_cseq -- clear CSEQ Mode n Int. status and Response mailbox
1118 * @asd_ha: pointer to host adapter struct
1119 */
1120static void asd_post_init_cseq(struct asd_ha_struct *asd_ha)
1121{
1122 int i;
1123
1124 for (i = 0; i < 8; i++)
1125 asd_write_reg_dword(asd_ha, CMnINT(i), 0xFFFFFFFF);
1126 for (i = 0; i < 8; i++)
1127 asd_read_reg_dword(asd_ha, CMnRSPMBX(i));
1128 /* Reset the external interrupt arbiter. */
1129 asd_write_reg_byte(asd_ha, CARP2INTCTL, RSTINTCTL);
1130}
1131
1132/**
1133 * asd_init_ddb_0 -- initialize DDB 0
1134 * @asd_ha: pointer to host adapter structure
1135 *
1136 * Initialize DDB site 0 which is used internally by the sequencer.
1137 */
1138static void asd_init_ddb_0(struct asd_ha_struct *asd_ha)
1139{
1140 int i;
1141
1142 /* Zero out the DDB explicitly */
1143 for (i = 0; i < sizeof(struct asd_ddb_seq_shared); i+=4)
1144 asd_ddbsite_write_dword(asd_ha, 0, i, 0);
1145
1146 asd_ddbsite_write_word(asd_ha, 0,
1147 offsetof(struct asd_ddb_seq_shared, q_free_ddb_head), 0);
1148 asd_ddbsite_write_word(asd_ha, 0,
1149 offsetof(struct asd_ddb_seq_shared, q_free_ddb_tail),
1150 asd_ha->hw_prof.max_ddbs-1);
1151 asd_ddbsite_write_word(asd_ha, 0,
1152 offsetof(struct asd_ddb_seq_shared, q_free_ddb_cnt), 0);
1153 asd_ddbsite_write_word(asd_ha, 0,
1154 offsetof(struct asd_ddb_seq_shared, q_used_ddb_head), 0xFFFF);
1155 asd_ddbsite_write_word(asd_ha, 0,
1156 offsetof(struct asd_ddb_seq_shared, q_used_ddb_tail), 0xFFFF);
1157 asd_ddbsite_write_word(asd_ha, 0,
1158 offsetof(struct asd_ddb_seq_shared, shared_mem_lock), 0);
1159 asd_ddbsite_write_word(asd_ha, 0,
1160 offsetof(struct asd_ddb_seq_shared, smp_conn_tag), 0);
1161 asd_ddbsite_write_word(asd_ha, 0,
1162 offsetof(struct asd_ddb_seq_shared, est_nexus_buf_cnt), 0);
1163 asd_ddbsite_write_word(asd_ha, 0,
1164 offsetof(struct asd_ddb_seq_shared, est_nexus_buf_thresh),
1165 asd_ha->hw_prof.num_phys * 2);
1166 asd_ddbsite_write_byte(asd_ha, 0,
1167 offsetof(struct asd_ddb_seq_shared, settable_max_contexts),0);
1168 asd_ddbsite_write_byte(asd_ha, 0,
1169 offsetof(struct asd_ddb_seq_shared, conn_not_active), 0xFF);
1170 asd_ddbsite_write_byte(asd_ha, 0,
1171 offsetof(struct asd_ddb_seq_shared, phy_is_up), 0x00);
1172 /* DDB 0 is reserved */
1173 set_bit(0, asd_ha->hw_prof.ddb_bitmap);
1174}
1175
1176/**
1177 * asd_seq_setup_seqs -- setup and initialize central and link sequencers
1178 * @asd_ha: pointer to host adapter structure
1179 */
1180static void asd_seq_setup_seqs(struct asd_ha_struct *asd_ha)
1181{
1182 int lseq;
1183 u8 lseq_mask;
1184
1185 /* Initialize SCB sites. Done first to compute some values which
1186 * the rest of the init code depends on. */
1187 asd_init_scb_sites(asd_ha);
1188
1189 /* Initialize CSEQ Scratch RAM registers. */
1190 asd_init_cseq_scratch(asd_ha);
1191
1192 /* Initialize LmSEQ Scratch RAM registers. */
1193 asd_init_lseq_scratch(asd_ha);
1194
1195 /* Initialize CSEQ CIO registers. */
1196 asd_init_cseq_cio(asd_ha);
1197
1198 asd_init_ddb_0(asd_ha);
1199
1200 /* Initialize LmSEQ CIO registers. */
1201 lseq_mask = asd_ha->hw_prof.enabled_phys;
1202 for_each_sequencer(lseq_mask, lseq_mask, lseq)
1203 asd_init_lseq_cio(asd_ha, lseq);
1204 asd_post_init_cseq(asd_ha);
1205}
1206
1207
1208/**
1209 * asd_seq_start_cseq -- start the central sequencer, CSEQ
1210 * @asd_ha: pointer to host adapter structure
1211 */
1212static int asd_seq_start_cseq(struct asd_ha_struct *asd_ha)
1213{
1214 /* Reset the ARP2 instruction to location zero. */
1215 asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop);
1216
1217 /* Unpause the CSEQ */
1218 return asd_unpause_cseq(asd_ha);
1219}
1220
1221/**
1222 * asd_seq_start_lseq -- start a link sequencer
1223 * @asd_ha: pointer to host adapter structure
1224 * @lseq: the link sequencer of interest
1225 */
1226static int asd_seq_start_lseq(struct asd_ha_struct *asd_ha, int lseq)
1227{
1228 /* Reset the ARP2 instruction to location zero. */
1229 asd_write_reg_word(asd_ha, LmPRGMCNT(lseq), lseq_idle_loop);
1230
1231 /* Unpause the LmSEQ */
1232 return asd_seq_unpause_lseq(asd_ha, lseq);
1233}
1234
1235static int asd_request_firmware(struct asd_ha_struct *asd_ha)
1236{
1237 int err, i;
1238 struct sequencer_file_header header, *hdr_ptr;
1239 u32 csum = 0;
1240 u16 *ptr_cseq_vecs, *ptr_lseq_vecs;
1241
1242 if (sequencer_fw)
1243 /* already loaded */
1244 return 0;
1245
1246 err = request_firmware(&sequencer_fw,
1247 SAS_RAZOR_SEQUENCER_FW_FILE,
1248 &asd_ha->pcidev->dev);
1249 if (err)
1250 return err;
1251
1252 hdr_ptr = (struct sequencer_file_header *)sequencer_fw->data;
1253
1254 header.csum = le32_to_cpu(hdr_ptr->csum);
1255 header.major = le32_to_cpu(hdr_ptr->major);
1256 header.minor = le32_to_cpu(hdr_ptr->minor);
1257 sequencer_version = hdr_ptr->version;
1258 header.cseq_table_offset = le32_to_cpu(hdr_ptr->cseq_table_offset);
1259 header.cseq_table_size = le32_to_cpu(hdr_ptr->cseq_table_size);
1260 header.lseq_table_offset = le32_to_cpu(hdr_ptr->lseq_table_offset);
1261 header.lseq_table_size = le32_to_cpu(hdr_ptr->lseq_table_size);
1262 header.cseq_code_offset = le32_to_cpu(hdr_ptr->cseq_code_offset);
1263 header.cseq_code_size = le32_to_cpu(hdr_ptr->cseq_code_size);
1264 header.lseq_code_offset = le32_to_cpu(hdr_ptr->lseq_code_offset);
1265 header.lseq_code_size = le32_to_cpu(hdr_ptr->lseq_code_size);
1266 header.mode2_task = le16_to_cpu(hdr_ptr->mode2_task);
1267 header.cseq_idle_loop = le16_to_cpu(hdr_ptr->cseq_idle_loop);
1268 header.lseq_idle_loop = le16_to_cpu(hdr_ptr->lseq_idle_loop);
1269
1270 for (i = sizeof(header.csum); i < sequencer_fw->size; i++)
1271 csum += sequencer_fw->data[i];
1272
1273 if (csum != header.csum) {
1274 asd_printk("Firmware file checksum mismatch\n");
1275 return -EINVAL;
1276 }
1277
1278 if (header.cseq_table_size != CSEQ_NUM_VECS ||
1279 header.lseq_table_size != LSEQ_NUM_VECS) {
1280 asd_printk("Firmware file table size mismatch\n");
1281 return -EINVAL;
1282 }
1283
1284 ptr_cseq_vecs = (u16 *)&sequencer_fw->data[header.cseq_table_offset];
1285 ptr_lseq_vecs = (u16 *)&sequencer_fw->data[header.lseq_table_offset];
1286 mode2_task = header.mode2_task;
1287 cseq_idle_loop = header.cseq_idle_loop;
1288 lseq_idle_loop = header.lseq_idle_loop;
1289
1290 for (i = 0; i < CSEQ_NUM_VECS; i++)
1291 cseq_vecs[i] = le16_to_cpu(ptr_cseq_vecs[i]);
1292
1293 for (i = 0; i < LSEQ_NUM_VECS; i++)
1294 lseq_vecs[i] = le16_to_cpu(ptr_lseq_vecs[i]);
1295
1296 cseq_code = &sequencer_fw->data[header.cseq_code_offset];
1297 cseq_code_size = header.cseq_code_size;
1298 lseq_code = &sequencer_fw->data[header.lseq_code_offset];
1299 lseq_code_size = header.lseq_code_size;
1300
1301 return 0;
1302}
1303
1304int asd_init_seqs(struct asd_ha_struct *asd_ha)
1305{
1306 int err;
1307
1308 err = asd_request_firmware(asd_ha);
1309
1310 if (err) {
1311 asd_printk("Failed to load sequencer firmware file %s, error %d\n",
1312 SAS_RAZOR_SEQUENCER_FW_FILE, err);
1313 return err;
1314 }
1315
1316 asd_printk("using sequencer %s\n", sequencer_version);
1317 err = asd_seq_download_seqs(asd_ha);
1318 if (err) {
1319 asd_printk("couldn't download sequencers for %s\n",
1320 pci_name(asd_ha->pcidev));
1321 return err;
1322 }
1323
1324 asd_seq_setup_seqs(asd_ha);
1325
1326 return 0;
1327}
1328
1329int asd_start_seqs(struct asd_ha_struct *asd_ha)
1330{
1331 int err;
1332 u8 lseq_mask;
1333 int lseq;
1334
1335 err = asd_seq_start_cseq(asd_ha);
1336 if (err) {
1337 asd_printk("couldn't start CSEQ for %s\n",
1338 pci_name(asd_ha->pcidev));
1339 return err;
1340 }
1341
1342 lseq_mask = asd_ha->hw_prof.enabled_phys;
1343 for_each_sequencer(lseq_mask, lseq_mask, lseq) {
1344 err = asd_seq_start_lseq(asd_ha, lseq);
1345 if (err) {
1346 asd_printk("coudln't start LSEQ %d for %s\n", lseq,
1347 pci_name(asd_ha->pcidev));
1348 return err;
1349 }
1350 }
1351
1352 return 0;
1353}
1354
1355/**
1356 * asd_update_port_links -- update port_map_by_links and phy_is_up
1357 * @sas_phy: pointer to the phy which has been added to a port
1358 *
1359 * 1) When a link reset has completed and we got BYTES DMAED with a
1360 * valid frame we call this function for that phy, to indicate that
1361 * the phy is up, i.e. we update the phy_is_up in DDB 0. The
1362 * sequencer checks phy_is_up when pending SCBs are to be sent, and
1363 * when an open address frame has been received.
1364 *
1365 * 2) When we know of ports, we call this function to update the map
1366 * of phys participaing in that port, i.e. we update the
1367 * port_map_by_links in DDB 0. When a HARD_RESET primitive has been
1368 * received, the sequencer disables all phys in that port.
1369 * port_map_by_links is also used as the conn_mask byte in the
1370 * initiator/target port DDB.
1371 */
1372void asd_update_port_links(struct asd_sas_phy *sas_phy)
1373{
1374 struct asd_ha_struct *asd_ha = sas_phy->ha->lldd_ha;
1375 const u8 phy_mask = (u8) sas_phy->port->phy_mask;
1376 u8 phy_is_up;
1377 u8 mask;
1378 int i, err;
1379
1380 for_each_phy(phy_mask, mask, i)
1381 asd_ddbsite_write_byte(asd_ha, 0,
1382 offsetof(struct asd_ddb_seq_shared,
1383 port_map_by_links)+i,phy_mask);
1384
1385 for (i = 0; i < 12; i++) {
1386 phy_is_up = asd_ddbsite_read_byte(asd_ha, 0,
1387 offsetof(struct asd_ddb_seq_shared, phy_is_up));
1388 err = asd_ddbsite_update_byte(asd_ha, 0,
1389 offsetof(struct asd_ddb_seq_shared, phy_is_up),
1390 phy_is_up,
1391 phy_is_up | phy_mask);
1392 if (!err)
1393 break;
1394 else if (err == -EFAULT) {
1395 asd_printk("phy_is_up: parity error in DDB 0\n");
1396 break;
1397 }
1398 }
1399
1400 if (err)
1401 asd_printk("couldn't update DDB 0:error:%d\n", err);
1402}
1403
1404MODULE_FIRMWARE(SAS_RAZOR_SEQUENCER_FW_FILE);
diff --git a/drivers/scsi/aic94xx/aic94xx_seq.h b/drivers/scsi/aic94xx/aic94xx_seq.h
new file mode 100644
index 000000000000..42281c36153b
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_seq.h
@@ -0,0 +1,70 @@
1/*
2 * Aic94xx SAS/SATA driver sequencer interface header file.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#ifndef _AIC94XX_SEQ_H_
28#define _AIC94XX_SEQ_H_
29
30#define CSEQ_NUM_VECS 3
31#define LSEQ_NUM_VECS 11
32
33#define SAS_RAZOR_SEQUENCER_FW_FILE "aic94xx-seq.fw"
34
35/* Note: All quantites in the sequencer file are little endian */
36struct sequencer_file_header {
37 /* Checksum of the entire contents of the sequencer excluding
38 * these four bytes */
39 u32 csum;
40 /* numeric major version */
41 u32 major;
42 /* numeric minor version */
43 u32 minor;
44 /* version string printed by driver */
45 char version[16];
46 u32 cseq_table_offset;
47 u32 cseq_table_size;
48 u32 lseq_table_offset;
49 u32 lseq_table_size;
50 u32 cseq_code_offset;
51 u32 cseq_code_size;
52 u32 lseq_code_offset;
53 u32 lseq_code_size;
54 u16 mode2_task;
55 u16 cseq_idle_loop;
56 u16 lseq_idle_loop;
57} __attribute__((packed));
58
59#ifdef __KERNEL__
60int asd_pause_cseq(struct asd_ha_struct *asd_ha);
61int asd_unpause_cseq(struct asd_ha_struct *asd_ha);
62int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask);
63int asd_unpause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask);
64int asd_init_seqs(struct asd_ha_struct *asd_ha);
65int asd_start_seqs(struct asd_ha_struct *asd_ha);
66
67void asd_update_port_links(struct asd_sas_phy *phy);
68#endif
69
70#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
new file mode 100644
index 000000000000..285e70dae933
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -0,0 +1,642 @@
1/*
2 * Aic94xx SAS/SATA Tasks
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#include <linux/spinlock.h>
28#include "aic94xx.h"
29#include "aic94xx_sas.h"
30#include "aic94xx_hwi.h"
31
32static void asd_unbuild_ata_ascb(struct asd_ascb *a);
33static void asd_unbuild_smp_ascb(struct asd_ascb *a);
34static void asd_unbuild_ssp_ascb(struct asd_ascb *a);
35
36static inline void asd_can_dequeue(struct asd_ha_struct *asd_ha, int num)
37{
38 unsigned long flags;
39
40 spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
41 asd_ha->seq.can_queue += num;
42 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
43}
44
45/* PCI_DMA_... to our direction translation.
46 */
47static const u8 data_dir_flags[] = {
48 [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */
49 [PCI_DMA_TODEVICE] = DATA_DIR_OUT, /* OUTBOUND */
50 [PCI_DMA_FROMDEVICE] = DATA_DIR_IN, /* INBOUND */
51 [PCI_DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */
52};
53
54static inline int asd_map_scatterlist(struct sas_task *task,
55 struct sg_el *sg_arr,
56 unsigned long gfp_flags)
57{
58 struct asd_ascb *ascb = task->lldd_task;
59 struct asd_ha_struct *asd_ha = ascb->ha;
60 struct scatterlist *sc;
61 int num_sg, res;
62
63 if (task->data_dir == PCI_DMA_NONE)
64 return 0;
65
66 if (task->num_scatter == 0) {
67 void *p = task->scatter;
68 dma_addr_t dma = pci_map_single(asd_ha->pcidev, p,
69 task->total_xfer_len,
70 task->data_dir);
71 sg_arr[0].bus_addr = cpu_to_le64((u64)dma);
72 sg_arr[0].size = cpu_to_le32(task->total_xfer_len);
73 sg_arr[0].flags |= ASD_SG_EL_LIST_EOL;
74 return 0;
75 }
76
77 num_sg = pci_map_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
78 task->data_dir);
79 if (num_sg == 0)
80 return -ENOMEM;
81
82 if (num_sg > 3) {
83 int i;
84
85 ascb->sg_arr = asd_alloc_coherent(asd_ha,
86 num_sg*sizeof(struct sg_el),
87 gfp_flags);
88 if (!ascb->sg_arr) {
89 res = -ENOMEM;
90 goto err_unmap;
91 }
92 for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) {
93 struct sg_el *sg =
94 &((struct sg_el *)ascb->sg_arr->vaddr)[i];
95 sg->bus_addr = cpu_to_le64((u64)sg_dma_address(sc));
96 sg->size = cpu_to_le32((u32)sg_dma_len(sc));
97 if (i == num_sg-1)
98 sg->flags |= ASD_SG_EL_LIST_EOL;
99 }
100
101 for (sc = task->scatter, i = 0; i < 2; i++, sc++) {
102 sg_arr[i].bus_addr =
103 cpu_to_le64((u64)sg_dma_address(sc));
104 sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
105 }
106 sg_arr[1].next_sg_offs = 2 * sizeof(*sg_arr);
107 sg_arr[1].flags |= ASD_SG_EL_LIST_EOS;
108
109 memset(&sg_arr[2], 0, sizeof(*sg_arr));
110 sg_arr[2].bus_addr=cpu_to_le64((u64)ascb->sg_arr->dma_handle);
111 } else {
112 int i;
113 for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) {
114 sg_arr[i].bus_addr =
115 cpu_to_le64((u64)sg_dma_address(sc));
116 sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
117 }
118 sg_arr[i-1].flags |= ASD_SG_EL_LIST_EOL;
119 }
120
121 return 0;
122err_unmap:
123 pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
124 task->data_dir);
125 return res;
126}
127
128static inline void asd_unmap_scatterlist(struct asd_ascb *ascb)
129{
130 struct asd_ha_struct *asd_ha = ascb->ha;
131 struct sas_task *task = ascb->uldd_task;
132
133 if (task->data_dir == PCI_DMA_NONE)
134 return;
135
136 if (task->num_scatter == 0) {
137 dma_addr_t dma = (dma_addr_t)
138 le64_to_cpu(ascb->scb->ssp_task.sg_element[0].bus_addr);
139 pci_unmap_single(ascb->ha->pcidev, dma, task->total_xfer_len,
140 task->data_dir);
141 return;
142 }
143
144 asd_free_coherent(asd_ha, ascb->sg_arr);
145 pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
146 task->data_dir);
147}
148
149/* ---------- Task complete tasklet ---------- */
150
151static void asd_get_response_tasklet(struct asd_ascb *ascb,
152 struct done_list_struct *dl)
153{
154 struct asd_ha_struct *asd_ha = ascb->ha;
155 struct sas_task *task = ascb->uldd_task;
156 struct task_status_struct *ts = &task->task_status;
157 unsigned long flags;
158 struct tc_resp_sb_struct {
159 __le16 index_escb;
160 u8 len_lsb;
161 u8 flags;
162 } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block;
163
164/* int size = ((resp_sb->flags & 7) << 8) | resp_sb->len_lsb; */
165 int edb_id = ((resp_sb->flags & 0x70) >> 4)-1;
166 struct asd_ascb *escb;
167 struct asd_dma_tok *edb;
168 void *r;
169
170 spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags);
171 escb = asd_tc_index_find(&asd_ha->seq,
172 (int)le16_to_cpu(resp_sb->index_escb));
173 spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags);
174
175 if (!escb) {
176 ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
177 return;
178 }
179
180 ts->buf_valid_size = 0;
181 edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
182 r = edb->vaddr;
183 if (task->task_proto == SAS_PROTO_SSP) {
184 struct ssp_response_iu *iu =
185 r + 16 + sizeof(struct ssp_frame_hdr);
186
187 ts->residual = le32_to_cpu(*(__le32 *)r);
188 ts->resp = SAS_TASK_COMPLETE;
189 if (iu->datapres == 0)
190 ts->stat = iu->status;
191 else if (iu->datapres == 1)
192 ts->stat = iu->resp_data[3];
193 else if (iu->datapres == 2) {
194 ts->stat = SAM_CHECK_COND;
195 ts->buf_valid_size = min((u32) SAS_STATUS_BUF_SIZE,
196 be32_to_cpu(iu->sense_data_len));
197 memcpy(ts->buf, iu->sense_data, ts->buf_valid_size);
198 if (iu->status != SAM_CHECK_COND) {
199 ASD_DPRINTK("device %llx sent sense data, but "
200 "stat(0x%x) is not CHECK_CONDITION"
201 "\n",
202 SAS_ADDR(task->dev->sas_addr),
203 ts->stat);
204 }
205 }
206 } else {
207 struct ata_task_resp *resp = (void *) &ts->buf[0];
208
209 ts->residual = le32_to_cpu(*(__le32 *)r);
210
211 if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
212 resp->frame_len = le16_to_cpu(*(__le16 *)(r+6));
213 memcpy(&resp->ending_fis[0], r+16, 24);
214 ts->buf_valid_size = sizeof(*resp);
215 }
216 }
217
218 asd_invalidate_edb(escb, edb_id);
219}
220
221static void asd_task_tasklet_complete(struct asd_ascb *ascb,
222 struct done_list_struct *dl)
223{
224 struct sas_task *task = ascb->uldd_task;
225 struct task_status_struct *ts = &task->task_status;
226 unsigned long flags;
227 u8 opcode = dl->opcode;
228
229 asd_can_dequeue(ascb->ha, 1);
230
231Again:
232 switch (opcode) {
233 case TC_NO_ERROR:
234 ts->resp = SAS_TASK_COMPLETE;
235 ts->stat = SAM_GOOD;
236 break;
237 case TC_UNDERRUN:
238 ts->resp = SAS_TASK_COMPLETE;
239 ts->stat = SAS_DATA_UNDERRUN;
240 ts->residual = le32_to_cpu(*(__le32 *)dl->status_block);
241 break;
242 case TC_OVERRUN:
243 ts->resp = SAS_TASK_COMPLETE;
244 ts->stat = SAS_DATA_OVERRUN;
245 ts->residual = 0;
246 break;
247 case TC_SSP_RESP:
248 case TC_ATA_RESP:
249 ts->resp = SAS_TASK_COMPLETE;
250 ts->stat = SAS_PROTO_RESPONSE;
251 asd_get_response_tasklet(ascb, dl);
252 break;
253 case TF_OPEN_REJECT:
254 ts->resp = SAS_TASK_UNDELIVERED;
255 ts->stat = SAS_OPEN_REJECT;
256 if (dl->status_block[1] & 2)
257 ts->open_rej_reason = 1 + dl->status_block[2];
258 else if (dl->status_block[1] & 1)
259 ts->open_rej_reason = (dl->status_block[2] >> 4)+10;
260 else
261 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
262 break;
263 case TF_OPEN_TO:
264 ts->resp = SAS_TASK_UNDELIVERED;
265 ts->stat = SAS_OPEN_TO;
266 break;
267 case TF_PHY_DOWN:
268 case TU_PHY_DOWN:
269 ts->resp = SAS_TASK_UNDELIVERED;
270 ts->stat = SAS_PHY_DOWN;
271 break;
272 case TI_PHY_DOWN:
273 ts->resp = SAS_TASK_COMPLETE;
274 ts->stat = SAS_PHY_DOWN;
275 break;
276 case TI_BREAK:
277 case TI_PROTO_ERR:
278 case TI_NAK:
279 case TI_ACK_NAK_TO:
280 case TF_SMP_XMIT_RCV_ERR:
281 case TC_ATA_R_ERR_RECV:
282 ts->resp = SAS_TASK_COMPLETE;
283 ts->stat = SAS_INTERRUPTED;
284 break;
285 case TF_BREAK:
286 case TU_BREAK:
287 case TU_ACK_NAK_TO:
288 case TF_SMPRSP_TO:
289 ts->resp = SAS_TASK_UNDELIVERED;
290 ts->stat = SAS_DEV_NO_RESPONSE;
291 break;
292 case TF_NAK_RECV:
293 ts->resp = SAS_TASK_COMPLETE;
294 ts->stat = SAS_NAK_R_ERR;
295 break;
296 case TA_I_T_NEXUS_LOSS:
297 opcode = dl->status_block[0];
298 goto Again;
299 break;
300 case TF_INV_CONN_HANDLE:
301 ts->resp = SAS_TASK_UNDELIVERED;
302 ts->stat = SAS_DEVICE_UNKNOWN;
303 break;
304 case TF_REQUESTED_N_PENDING:
305 ts->resp = SAS_TASK_UNDELIVERED;
306 ts->stat = SAS_PENDING;
307 break;
308 case TC_TASK_CLEARED:
309 case TA_ON_REQ:
310 ts->resp = SAS_TASK_COMPLETE;
311 ts->stat = SAS_ABORTED_TASK;
312 break;
313
314 case TF_NO_SMP_CONN:
315 case TF_TMF_NO_CTX:
316 case TF_TMF_NO_TAG:
317 case TF_TMF_TAG_FREE:
318 case TF_TMF_TASK_DONE:
319 case TF_TMF_NO_CONN_HANDLE:
320 case TF_IRTT_TO:
321 case TF_IU_SHORT:
322 case TF_DATA_OFFS_ERR:
323 ts->resp = SAS_TASK_UNDELIVERED;
324 ts->stat = SAS_DEV_NO_RESPONSE;
325 break;
326
327 case TC_LINK_ADM_RESP:
328 case TC_CONTROL_PHY:
329 case TC_RESUME:
330 case TC_PARTIAL_SG_LIST:
331 default:
332 ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __FUNCTION__, opcode);
333 break;
334 }
335
336 switch (task->task_proto) {
337 case SATA_PROTO:
338 case SAS_PROTO_STP:
339 asd_unbuild_ata_ascb(ascb);
340 break;
341 case SAS_PROTO_SMP:
342 asd_unbuild_smp_ascb(ascb);
343 break;
344 case SAS_PROTO_SSP:
345 asd_unbuild_ssp_ascb(ascb);
346 default:
347 break;
348 }
349
350 spin_lock_irqsave(&task->task_state_lock, flags);
351 task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
352 task->task_state_flags |= SAS_TASK_STATE_DONE;
353 if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) {
354 spin_unlock_irqrestore(&task->task_state_lock, flags);
355 ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x "
356 "stat 0x%x but aborted by upper layer!\n",
357 task, opcode, ts->resp, ts->stat);
358 complete(&ascb->completion);
359 } else {
360 spin_unlock_irqrestore(&task->task_state_lock, flags);
361 task->lldd_task = NULL;
362 asd_ascb_free(ascb);
363 mb();
364 task->task_done(task);
365 }
366}
367
368/* ---------- ATA ---------- */
369
370static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task,
371 unsigned long gfp_flags)
372{
373 struct domain_device *dev = task->dev;
374 struct scb *scb;
375 u8 flags;
376 int res = 0;
377
378 scb = ascb->scb;
379
380 if (unlikely(task->ata_task.device_control_reg_update))
381 scb->header.opcode = CONTROL_ATA_DEV;
382 else if (dev->sata_dev.command_set == ATA_COMMAND_SET)
383 scb->header.opcode = INITIATE_ATA_TASK;
384 else
385 scb->header.opcode = INITIATE_ATAPI_TASK;
386
387 scb->ata_task.proto_conn_rate = (1 << 5); /* STP */
388 if (dev->port->oob_mode == SAS_OOB_MODE)
389 scb->ata_task.proto_conn_rate |= dev->linkrate;
390
391 scb->ata_task.total_xfer_len = cpu_to_le32(task->total_xfer_len);
392 scb->ata_task.fis = task->ata_task.fis;
393 scb->ata_task.fis.fis_type = 0x27;
394 if (likely(!task->ata_task.device_control_reg_update))
395 scb->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
396 scb->ata_task.fis.flags &= 0xF0; /* PM_PORT field shall be 0 */
397 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
398 memcpy(scb->ata_task.atapi_packet, task->ata_task.atapi_packet,
399 16);
400 scb->ata_task.sister_scb = cpu_to_le16(0xFFFF);
401 scb->ata_task.conn_handle = cpu_to_le16(
402 (u16)(unsigned long)dev->lldd_dev);
403
404 if (likely(!task->ata_task.device_control_reg_update)) {
405 flags = 0;
406 if (task->ata_task.dma_xfer)
407 flags |= DATA_XFER_MODE_DMA;
408 if (task->ata_task.use_ncq &&
409 dev->sata_dev.command_set != ATAPI_COMMAND_SET)
410 flags |= ATA_Q_TYPE_NCQ;
411 flags |= data_dir_flags[task->data_dir];
412 scb->ata_task.ata_flags = flags;
413
414 scb->ata_task.retry_count = task->ata_task.retry_count;
415
416 flags = 0;
417 if (task->ata_task.set_affil_pol)
418 flags |= SET_AFFIL_POLICY;
419 if (task->ata_task.stp_affil_pol)
420 flags |= STP_AFFIL_POLICY;
421 scb->ata_task.flags = flags;
422 }
423 ascb->tasklet_complete = asd_task_tasklet_complete;
424
425 if (likely(!task->ata_task.device_control_reg_update))
426 res = asd_map_scatterlist(task, scb->ata_task.sg_element,
427 gfp_flags);
428
429 return res;
430}
431
432static void asd_unbuild_ata_ascb(struct asd_ascb *a)
433{
434 asd_unmap_scatterlist(a);
435}
436
437/* ---------- SMP ---------- */
438
439static int asd_build_smp_ascb(struct asd_ascb *ascb, struct sas_task *task,
440 unsigned long gfp_flags)
441{
442 struct asd_ha_struct *asd_ha = ascb->ha;
443 struct domain_device *dev = task->dev;
444 struct scb *scb;
445
446 pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_req, 1,
447 PCI_DMA_FROMDEVICE);
448 pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_resp, 1,
449 PCI_DMA_FROMDEVICE);
450
451 scb = ascb->scb;
452
453 scb->header.opcode = INITIATE_SMP_TASK;
454
455 scb->smp_task.proto_conn_rate = dev->linkrate;
456
457 scb->smp_task.smp_req.bus_addr =
458 cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
459 scb->smp_task.smp_req.size =
460 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4);
461
462 scb->smp_task.smp_resp.bus_addr =
463 cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp));
464 scb->smp_task.smp_resp.size =
465 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
466
467 scb->smp_task.sister_scb = cpu_to_le16(0xFFFF);
468 scb->smp_task.conn_handle = cpu_to_le16((u16)
469 (unsigned long)dev->lldd_dev);
470
471 ascb->tasklet_complete = asd_task_tasklet_complete;
472
473 return 0;
474}
475
476static void asd_unbuild_smp_ascb(struct asd_ascb *a)
477{
478 struct sas_task *task = a->uldd_task;
479
480 BUG_ON(!task);
481 pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_req, 1,
482 PCI_DMA_FROMDEVICE);
483 pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_resp, 1,
484 PCI_DMA_FROMDEVICE);
485}
486
487/* ---------- SSP ---------- */
488
489static int asd_build_ssp_ascb(struct asd_ascb *ascb, struct sas_task *task,
490 unsigned long gfp_flags)
491{
492 struct domain_device *dev = task->dev;
493 struct scb *scb;
494 int res = 0;
495
496 scb = ascb->scb;
497
498 scb->header.opcode = INITIATE_SSP_TASK;
499
500 scb->ssp_task.proto_conn_rate = (1 << 4); /* SSP */
501 scb->ssp_task.proto_conn_rate |= dev->linkrate;
502 scb->ssp_task.total_xfer_len = cpu_to_le32(task->total_xfer_len);
503 scb->ssp_task.ssp_frame.frame_type = SSP_DATA;
504 memcpy(scb->ssp_task.ssp_frame.hashed_dest_addr, dev->hashed_sas_addr,
505 HASHED_SAS_ADDR_SIZE);
506 memcpy(scb->ssp_task.ssp_frame.hashed_src_addr,
507 dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
508 scb->ssp_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
509
510 memcpy(scb->ssp_task.ssp_cmd.lun, task->ssp_task.LUN, 8);
511 if (task->ssp_task.enable_first_burst)
512 scb->ssp_task.ssp_cmd.efb_prio_attr |= EFB_MASK;
513 scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_prio << 3);
514 scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_attr & 7);
515 memcpy(scb->ssp_task.ssp_cmd.cdb, task->ssp_task.cdb, 16);
516
517 scb->ssp_task.sister_scb = cpu_to_le16(0xFFFF);
518 scb->ssp_task.conn_handle = cpu_to_le16(
519 (u16)(unsigned long)dev->lldd_dev);
520 scb->ssp_task.data_dir = data_dir_flags[task->data_dir];
521 scb->ssp_task.retry_count = scb->ssp_task.retry_count;
522
523 ascb->tasklet_complete = asd_task_tasklet_complete;
524
525 res = asd_map_scatterlist(task, scb->ssp_task.sg_element, gfp_flags);
526
527 return res;
528}
529
530static void asd_unbuild_ssp_ascb(struct asd_ascb *a)
531{
532 asd_unmap_scatterlist(a);
533}
534
535/* ---------- Execute Task ---------- */
536
537static inline int asd_can_queue(struct asd_ha_struct *asd_ha, int num)
538{
539 int res = 0;
540 unsigned long flags;
541
542 spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
543 if ((asd_ha->seq.can_queue - num) < 0)
544 res = -SAS_QUEUE_FULL;
545 else
546 asd_ha->seq.can_queue -= num;
547 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
548
549 return res;
550}
551
552int asd_execute_task(struct sas_task *task, const int num,
553 unsigned long gfp_flags)
554{
555 int res = 0;
556 LIST_HEAD(alist);
557 struct sas_task *t = task;
558 struct asd_ascb *ascb = NULL, *a;
559 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
560
561 res = asd_can_queue(asd_ha, num);
562 if (res)
563 return res;
564
565 res = num;
566 ascb = asd_ascb_alloc_list(asd_ha, &res, gfp_flags);
567 if (res) {
568 res = -ENOMEM;
569 goto out_err;
570 }
571
572 __list_add(&alist, ascb->list.prev, &ascb->list);
573 list_for_each_entry(a, &alist, list) {
574 a->uldd_task = t;
575 t->lldd_task = a;
576 t = list_entry(t->list.next, struct sas_task, list);
577 }
578 list_for_each_entry(a, &alist, list) {
579 t = a->uldd_task;
580 a->uldd_timer = 1;
581 if (t->task_proto & SAS_PROTO_STP)
582 t->task_proto = SAS_PROTO_STP;
583 switch (t->task_proto) {
584 case SATA_PROTO:
585 case SAS_PROTO_STP:
586 res = asd_build_ata_ascb(a, t, gfp_flags);
587 break;
588 case SAS_PROTO_SMP:
589 res = asd_build_smp_ascb(a, t, gfp_flags);
590 break;
591 case SAS_PROTO_SSP:
592 res = asd_build_ssp_ascb(a, t, gfp_flags);
593 break;
594 default:
595 asd_printk("unknown sas_task proto: 0x%x\n",
596 t->task_proto);
597 res = -ENOMEM;
598 break;
599 }
600 if (res)
601 goto out_err_unmap;
602 }
603 list_del_init(&alist);
604
605 res = asd_post_ascb_list(asd_ha, ascb, num);
606 if (unlikely(res)) {
607 a = NULL;
608 __list_add(&alist, ascb->list.prev, &ascb->list);
609 goto out_err_unmap;
610 }
611
612 return 0;
613out_err_unmap:
614 {
615 struct asd_ascb *b = a;
616 list_for_each_entry(a, &alist, list) {
617 if (a == b)
618 break;
619 t = a->uldd_task;
620 switch (t->task_proto) {
621 case SATA_PROTO:
622 case SAS_PROTO_STP:
623 asd_unbuild_ata_ascb(a);
624 break;
625 case SAS_PROTO_SMP:
626 asd_unbuild_smp_ascb(a);
627 break;
628 case SAS_PROTO_SSP:
629 asd_unbuild_ssp_ascb(a);
630 default:
631 break;
632 }
633 t->lldd_task = NULL;
634 }
635 }
636 list_del_init(&alist);
637out_err:
638 if (ascb)
639 asd_ascb_free_list(ascb);
640 asd_can_dequeue(asd_ha, num);
641 return res;
642}
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
new file mode 100644
index 000000000000..61234384503b
--- /dev/null
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -0,0 +1,636 @@
1/*
2 * Aic94xx Task Management Functions
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#include <linux/spinlock.h>
28#include "aic94xx.h"
29#include "aic94xx_sas.h"
30#include "aic94xx_hwi.h"
31
32/* ---------- Internal enqueue ---------- */
33
34static int asd_enqueue_internal(struct asd_ascb *ascb,
35 void (*tasklet_complete)(struct asd_ascb *,
36 struct done_list_struct *),
37 void (*timed_out)(unsigned long))
38{
39 int res;
40
41 ascb->tasklet_complete = tasklet_complete;
42 ascb->uldd_timer = 1;
43
44 ascb->timer.data = (unsigned long) ascb;
45 ascb->timer.function = timed_out;
46 ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
47
48 add_timer(&ascb->timer);
49
50 res = asd_post_ascb_list(ascb->ha, ascb, 1);
51 if (unlikely(res))
52 del_timer(&ascb->timer);
53 return res;
54}
55
56static inline void asd_timedout_common(unsigned long data)
57{
58 struct asd_ascb *ascb = (void *) data;
59 struct asd_seq_data *seq = &ascb->ha->seq;
60 unsigned long flags;
61
62 spin_lock_irqsave(&seq->pend_q_lock, flags);
63 seq->pending--;
64 list_del_init(&ascb->list);
65 spin_unlock_irqrestore(&seq->pend_q_lock, flags);
66}
67
68/* ---------- CLEAR NEXUS ---------- */
69
70static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
71 struct done_list_struct *dl)
72{
73 ASD_DPRINTK("%s: here\n", __FUNCTION__);
74 if (!del_timer(&ascb->timer)) {
75 ASD_DPRINTK("%s: couldn't delete timer\n", __FUNCTION__);
76 return;
77 }
78 ASD_DPRINTK("%s: opcode: 0x%x\n", __FUNCTION__, dl->opcode);
79 ascb->uldd_task = (void *) (unsigned long) dl->opcode;
80 complete(&ascb->completion);
81}
82
83static void asd_clear_nexus_timedout(unsigned long data)
84{
85 struct asd_ascb *ascb = (void *) data;
86
87 ASD_DPRINTK("%s: here\n", __FUNCTION__);
88 asd_timedout_common(data);
89 ascb->uldd_task = (void *) TMF_RESP_FUNC_FAILED;
90 complete(&ascb->completion);
91}
92
93#define CLEAR_NEXUS_PRE \
94 ASD_DPRINTK("%s: PRE\n", __FUNCTION__); \
95 res = 1; \
96 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \
97 if (!ascb) \
98 return -ENOMEM; \
99 \
100 scb = ascb->scb; \
101 scb->header.opcode = CLEAR_NEXUS
102
103#define CLEAR_NEXUS_POST \
104 ASD_DPRINTK("%s: POST\n", __FUNCTION__); \
105 res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \
106 asd_clear_nexus_timedout); \
107 if (res) \
108 goto out_err; \
109 ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __FUNCTION__); \
110 wait_for_completion(&ascb->completion); \
111 res = (int) (unsigned long) ascb->uldd_task; \
112 if (res == TC_NO_ERROR) \
113 res = TMF_RESP_FUNC_COMPLETE; \
114out_err: \
115 asd_ascb_free(ascb); \
116 return res
117
118int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha)
119{
120 struct asd_ha_struct *asd_ha = sas_ha->lldd_ha;
121 struct asd_ascb *ascb;
122 struct scb *scb;
123 int res;
124
125 CLEAR_NEXUS_PRE;
126 scb->clear_nexus.nexus = NEXUS_ADAPTER;
127 CLEAR_NEXUS_POST;
128}
129
130int asd_clear_nexus_port(struct asd_sas_port *port)
131{
132 struct asd_ha_struct *asd_ha = port->ha->lldd_ha;
133 struct asd_ascb *ascb;
134 struct scb *scb;
135 int res;
136
137 CLEAR_NEXUS_PRE;
138 scb->clear_nexus.nexus = NEXUS_PORT;
139 scb->clear_nexus.conn_mask = port->phy_mask;
140 CLEAR_NEXUS_POST;
141}
142
143#if 0
144static int asd_clear_nexus_I_T(struct domain_device *dev)
145{
146 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
147 struct asd_ascb *ascb;
148 struct scb *scb;
149 int res;
150
151 CLEAR_NEXUS_PRE;
152 scb->clear_nexus.nexus = NEXUS_I_T;
153 scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ;
154 if (dev->tproto)
155 scb->clear_nexus.flags |= SUSPEND_TX;
156 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
157 dev->lldd_dev);
158 CLEAR_NEXUS_POST;
159}
160#endif
161
162static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun)
163{
164 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
165 struct asd_ascb *ascb;
166 struct scb *scb;
167 int res;
168
169 CLEAR_NEXUS_PRE;
170 scb->clear_nexus.nexus = NEXUS_I_T_L;
171 scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ;
172 if (dev->tproto)
173 scb->clear_nexus.flags |= SUSPEND_TX;
174 memcpy(scb->clear_nexus.ssp_task.lun, lun, 8);
175 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
176 dev->lldd_dev);
177 CLEAR_NEXUS_POST;
178}
179
180static int asd_clear_nexus_tag(struct sas_task *task)
181{
182 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
183 struct asd_ascb *tascb = task->lldd_task;
184 struct asd_ascb *ascb;
185 struct scb *scb;
186 int res;
187
188 CLEAR_NEXUS_PRE;
189 scb->clear_nexus.nexus = NEXUS_TAG;
190 memcpy(scb->clear_nexus.ssp_task.lun, task->ssp_task.LUN, 8);
191 scb->clear_nexus.ssp_task.tag = tascb->tag;
192 if (task->dev->tproto)
193 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
194 task->dev->lldd_dev);
195 CLEAR_NEXUS_POST;
196}
197
198static int asd_clear_nexus_index(struct sas_task *task)
199{
200 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
201 struct asd_ascb *tascb = task->lldd_task;
202 struct asd_ascb *ascb;
203 struct scb *scb;
204 int res;
205
206 CLEAR_NEXUS_PRE;
207 scb->clear_nexus.nexus = NEXUS_TRANS_CX;
208 if (task->dev->tproto)
209 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
210 task->dev->lldd_dev);
211 scb->clear_nexus.index = cpu_to_le16(tascb->tc_index);
212 CLEAR_NEXUS_POST;
213}
214
215/* ---------- TMFs ---------- */
216
217static void asd_tmf_timedout(unsigned long data)
218{
219 struct asd_ascb *ascb = (void *) data;
220
221 ASD_DPRINTK("tmf timed out\n");
222 asd_timedout_common(data);
223 ascb->uldd_task = (void *) TMF_RESP_FUNC_FAILED;
224 complete(&ascb->completion);
225}
226
227static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb,
228 struct done_list_struct *dl)
229{
230 struct asd_ha_struct *asd_ha = ascb->ha;
231 unsigned long flags;
232 struct tc_resp_sb_struct {
233 __le16 index_escb;
234 u8 len_lsb;
235 u8 flags;
236 } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block;
237
238 int edb_id = ((resp_sb->flags & 0x70) >> 4)-1;
239 struct asd_ascb *escb;
240 struct asd_dma_tok *edb;
241 struct ssp_frame_hdr *fh;
242 struct ssp_response_iu *ru;
243 int res = TMF_RESP_FUNC_FAILED;
244
245 ASD_DPRINTK("tmf resp tasklet\n");
246
247 spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags);
248 escb = asd_tc_index_find(&asd_ha->seq,
249 (int)le16_to_cpu(resp_sb->index_escb));
250 spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags);
251
252 if (!escb) {
253 ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
254 return res;
255 }
256
257 edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
258 ascb->tag = *(__be16 *)(edb->vaddr+4);
259 fh = edb->vaddr + 16;
260 ru = edb->vaddr + 16 + sizeof(*fh);
261 res = ru->status;
262 if (ru->datapres == 1) /* Response data present */
263 res = ru->resp_data[3];
264#if 0
265 ascb->tag = fh->tag;
266#endif
267 ascb->tag_valid = 1;
268
269 asd_invalidate_edb(escb, edb_id);
270 return res;
271}
272
273static void asd_tmf_tasklet_complete(struct asd_ascb *ascb,
274 struct done_list_struct *dl)
275{
276 if (!del_timer(&ascb->timer))
277 return;
278
279 ASD_DPRINTK("tmf tasklet complete\n");
280
281 if (dl->opcode == TC_SSP_RESP)
282 ascb->uldd_task = (void *) (unsigned long)
283 asd_get_tmf_resp_tasklet(ascb, dl);
284 else
285 ascb->uldd_task = (void *) 0xFF00 + (unsigned long) dl->opcode;
286
287 complete(&ascb->completion);
288}
289
290static inline int asd_clear_nexus(struct sas_task *task)
291{
292 int res = TMF_RESP_FUNC_FAILED;
293 struct asd_ascb *tascb = task->lldd_task;
294 unsigned long flags;
295
296 ASD_DPRINTK("task not done, clearing nexus\n");
297 if (tascb->tag_valid)
298 res = asd_clear_nexus_tag(task);
299 else
300 res = asd_clear_nexus_index(task);
301 wait_for_completion_timeout(&tascb->completion,
302 AIC94XX_SCB_TIMEOUT);
303 ASD_DPRINTK("came back from clear nexus\n");
304 spin_lock_irqsave(&task->task_state_lock, flags);
305 if (task->task_state_flags & SAS_TASK_STATE_DONE)
306 res = TMF_RESP_FUNC_COMPLETE;
307 spin_unlock_irqrestore(&task->task_state_lock, flags);
308
309 return res;
310}
311
312/**
313 * asd_abort_task -- ABORT TASK TMF
314 * @task: the task to be aborted
315 *
316 * Before calling ABORT TASK the task state flags should be ORed with
317 * SAS_TASK_STATE_ABORTED (unless SAS_TASK_STATE_DONE is set) under
318 * the task_state_lock IRQ spinlock, then ABORT TASK *must* be called.
319 *
320 * Implements the ABORT TASK TMF, I_T_L_Q nexus.
321 * Returns: SAS TMF responses (see sas_task.h),
322 * -ENOMEM,
323 * -SAS_QUEUE_FULL.
324 *
325 * When ABORT TASK returns, the caller of ABORT TASK checks first the
326 * task->task_state_flags, and then the return value of ABORT TASK.
327 *
328 * If the task has task state bit SAS_TASK_STATE_DONE set, then the
329 * task was completed successfully prior to it being aborted. The
330 * caller of ABORT TASK has responsibility to call task->task_done()
331 * xor free the task, depending on their framework. The return code
332 * is TMF_RESP_FUNC_FAILED in this case.
333 *
334 * Else the SAS_TASK_STATE_DONE bit is not set,
335 * If the return code is TMF_RESP_FUNC_COMPLETE, then
336 * the task was aborted successfully. The caller of
337 * ABORT TASK has responsibility to call task->task_done()
338 * to finish the task, xor free the task depending on their
339 * framework.
340 * else
341 * the ABORT TASK returned some kind of error. The task
342 * was _not_ cancelled. Nothing can be assumed.
343 * The caller of ABORT TASK may wish to retry.
344 */
345int asd_abort_task(struct sas_task *task)
346{
347 struct asd_ascb *tascb = task->lldd_task;
348 struct asd_ha_struct *asd_ha = tascb->ha;
349 int res = 1;
350 unsigned long flags;
351 struct asd_ascb *ascb = NULL;
352 struct scb *scb;
353
354 spin_lock_irqsave(&task->task_state_lock, flags);
355 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
356 spin_unlock_irqrestore(&task->task_state_lock, flags);
357 res = TMF_RESP_FUNC_COMPLETE;
358 ASD_DPRINTK("%s: task 0x%p done\n", __FUNCTION__, task);
359 goto out_done;
360 }
361 spin_unlock_irqrestore(&task->task_state_lock, flags);
362
363 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
364 if (!ascb)
365 return -ENOMEM;
366 scb = ascb->scb;
367
368 scb->header.opcode = ABORT_TASK;
369
370 switch (task->task_proto) {
371 case SATA_PROTO:
372 case SAS_PROTO_STP:
373 scb->abort_task.proto_conn_rate = (1 << 5); /* STP */
374 break;
375 case SAS_PROTO_SSP:
376 scb->abort_task.proto_conn_rate = (1 << 4); /* SSP */
377 scb->abort_task.proto_conn_rate |= task->dev->linkrate;
378 break;
379 case SAS_PROTO_SMP:
380 break;
381 default:
382 break;
383 }
384
385 if (task->task_proto == SAS_PROTO_SSP) {
386 scb->abort_task.ssp_frame.frame_type = SSP_TASK;
387 memcpy(scb->abort_task.ssp_frame.hashed_dest_addr,
388 task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
389 memcpy(scb->abort_task.ssp_frame.hashed_src_addr,
390 task->dev->port->ha->hashed_sas_addr,
391 HASHED_SAS_ADDR_SIZE);
392 scb->abort_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
393
394 memcpy(scb->abort_task.ssp_task.lun, task->ssp_task.LUN, 8);
395 scb->abort_task.ssp_task.tmf = TMF_ABORT_TASK;
396 scb->abort_task.ssp_task.tag = cpu_to_be16(0xFFFF);
397 }
398
399 scb->abort_task.sister_scb = cpu_to_le16(0xFFFF);
400 scb->abort_task.conn_handle = cpu_to_le16(
401 (u16)(unsigned long)task->dev->lldd_dev);
402 scb->abort_task.retry_count = 1;
403 scb->abort_task.index = cpu_to_le16((u16)tascb->tc_index);
404 scb->abort_task.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
405
406 res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
407 asd_tmf_timedout);
408 if (res)
409 goto out;
410 wait_for_completion(&ascb->completion);
411 ASD_DPRINTK("tmf came back\n");
412
413 res = (int) (unsigned long) ascb->uldd_task;
414 tascb->tag = ascb->tag;
415 tascb->tag_valid = ascb->tag_valid;
416
417 spin_lock_irqsave(&task->task_state_lock, flags);
418 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
419 spin_unlock_irqrestore(&task->task_state_lock, flags);
420 res = TMF_RESP_FUNC_COMPLETE;
421 ASD_DPRINTK("%s: task 0x%p done\n", __FUNCTION__, task);
422 goto out_done;
423 }
424 spin_unlock_irqrestore(&task->task_state_lock, flags);
425
426 switch (res) {
427 /* The task to be aborted has been sent to the device.
428 * We got a Response IU for the ABORT TASK TMF. */
429 case TC_NO_ERROR + 0xFF00:
430 case TMF_RESP_FUNC_COMPLETE:
431 case TMF_RESP_FUNC_FAILED:
432 res = asd_clear_nexus(task);
433 break;
434 case TMF_RESP_INVALID_FRAME:
435 case TMF_RESP_OVERLAPPED_TAG:
436 case TMF_RESP_FUNC_ESUPP:
437 case TMF_RESP_NO_LUN:
438 goto out_done; break;
439 }
440 /* In the following we assume that the managing layer
441 * will _never_ make a mistake, when issuing ABORT TASK.
442 */
443 switch (res) {
444 default:
445 res = asd_clear_nexus(task);
446 /* fallthrough */
447 case TC_NO_ERROR + 0xFF00:
448 case TMF_RESP_FUNC_COMPLETE:
449 break;
450 /* The task hasn't been sent to the device xor we never got
451 * a (sane) Response IU for the ABORT TASK TMF.
452 */
453 case TF_NAK_RECV + 0xFF00:
454 res = TMF_RESP_INVALID_FRAME;
455 break;
456 case TF_TMF_TASK_DONE + 0xFF00: /* done but not reported yet */
457 res = TMF_RESP_FUNC_FAILED;
458 wait_for_completion_timeout(&tascb->completion,
459 AIC94XX_SCB_TIMEOUT);
460 spin_lock_irqsave(&task->task_state_lock, flags);
461 if (task->task_state_flags & SAS_TASK_STATE_DONE)
462 res = TMF_RESP_FUNC_COMPLETE;
463 spin_unlock_irqrestore(&task->task_state_lock, flags);
464 goto out_done;
465 case TF_TMF_NO_TAG + 0xFF00:
466 case TF_TMF_TAG_FREE + 0xFF00: /* the tag is in the free list */
467 case TF_TMF_NO_CONN_HANDLE + 0xFF00: /* no such device */
468 res = TMF_RESP_FUNC_COMPLETE;
469 goto out_done;
470 case TF_TMF_NO_CTX + 0xFF00: /* not in seq, or proto != SSP */
471 res = TMF_RESP_FUNC_ESUPP;
472 goto out;
473 }
474out_done:
475 if (res == TMF_RESP_FUNC_COMPLETE) {
476 task->lldd_task = NULL;
477 mb();
478 asd_ascb_free(tascb);
479 }
480out:
481 asd_ascb_free(ascb);
482 ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
483 return res;
484}
485
486/**
487 * asd_initiate_ssp_tmf -- send a TMF to an I_T_L or I_T_L_Q nexus
488 * @dev: pointer to struct domain_device of interest
489 * @lun: pointer to u8[8] which is the LUN
490 * @tmf: the TMF to be performed (see sas_task.h or the SAS spec)
491 * @index: the transaction context of the task to be queried if QT TMF
492 *
493 * This function is used to send ABORT TASK SET, CLEAR ACA,
494 * CLEAR TASK SET, LU RESET and QUERY TASK TMFs.
495 *
496 * No SCBs should be queued to the I_T_L nexus when this SCB is
497 * pending.
498 *
499 * Returns: TMF response code (see sas_task.h or the SAS spec)
500 */
501static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
502 int tmf, int index)
503{
504 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
505 struct asd_ascb *ascb;
506 int res = 1;
507 struct scb *scb;
508
509 if (!(dev->tproto & SAS_PROTO_SSP))
510 return TMF_RESP_FUNC_ESUPP;
511
512 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
513 if (!ascb)
514 return -ENOMEM;
515 scb = ascb->scb;
516
517 if (tmf == TMF_QUERY_TASK)
518 scb->header.opcode = QUERY_SSP_TASK;
519 else
520 scb->header.opcode = INITIATE_SSP_TMF;
521
522 scb->ssp_tmf.proto_conn_rate = (1 << 4); /* SSP */
523 scb->ssp_tmf.proto_conn_rate |= dev->linkrate;
524 /* SSP frame header */
525 scb->ssp_tmf.ssp_frame.frame_type = SSP_TASK;
526 memcpy(scb->ssp_tmf.ssp_frame.hashed_dest_addr,
527 dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
528 memcpy(scb->ssp_tmf.ssp_frame.hashed_src_addr,
529 dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
530 scb->ssp_tmf.ssp_frame.tptt = cpu_to_be16(0xFFFF);
531 /* SSP Task IU */
532 memcpy(scb->ssp_tmf.ssp_task.lun, lun, 8);
533 scb->ssp_tmf.ssp_task.tmf = tmf;
534
535 scb->ssp_tmf.sister_scb = cpu_to_le16(0xFFFF);
536 scb->ssp_tmf.conn_handle= cpu_to_le16((u16)(unsigned long)
537 dev->lldd_dev);
538 scb->ssp_tmf.retry_count = 1;
539 scb->ssp_tmf.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
540 if (tmf == TMF_QUERY_TASK)
541 scb->ssp_tmf.index = cpu_to_le16(index);
542
543 res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
544 asd_tmf_timedout);
545 if (res)
546 goto out_err;
547 wait_for_completion(&ascb->completion);
548 res = (int) (unsigned long) ascb->uldd_task;
549
550 switch (res) {
551 case TC_NO_ERROR + 0xFF00:
552 res = TMF_RESP_FUNC_COMPLETE;
553 break;
554 case TF_NAK_RECV + 0xFF00:
555 res = TMF_RESP_INVALID_FRAME;
556 break;
557 case TF_TMF_TASK_DONE + 0xFF00:
558 res = TMF_RESP_FUNC_FAILED;
559 break;
560 case TF_TMF_NO_TAG + 0xFF00:
561 case TF_TMF_TAG_FREE + 0xFF00: /* the tag is in the free list */
562 case TF_TMF_NO_CONN_HANDLE + 0xFF00: /* no such device */
563 res = TMF_RESP_FUNC_COMPLETE;
564 break;
565 case TF_TMF_NO_CTX + 0xFF00: /* not in seq, or proto != SSP */
566 res = TMF_RESP_FUNC_ESUPP;
567 break;
568 default:
569 ASD_DPRINTK("%s: converting result 0x%x to TMF_RESP_FUNC_FAILED\n",
570 __FUNCTION__, res);
571 res = TMF_RESP_FUNC_FAILED;
572 break;
573 }
574out_err:
575 asd_ascb_free(ascb);
576 return res;
577}
578
579int asd_abort_task_set(struct domain_device *dev, u8 *lun)
580{
581 int res = asd_initiate_ssp_tmf(dev, lun, TMF_ABORT_TASK_SET, 0);
582
583 if (res == TMF_RESP_FUNC_COMPLETE)
584 asd_clear_nexus_I_T_L(dev, lun);
585 return res;
586}
587
588int asd_clear_aca(struct domain_device *dev, u8 *lun)
589{
590 int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_ACA, 0);
591
592 if (res == TMF_RESP_FUNC_COMPLETE)
593 asd_clear_nexus_I_T_L(dev, lun);
594 return res;
595}
596
597int asd_clear_task_set(struct domain_device *dev, u8 *lun)
598{
599 int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_TASK_SET, 0);
600
601 if (res == TMF_RESP_FUNC_COMPLETE)
602 asd_clear_nexus_I_T_L(dev, lun);
603 return res;
604}
605
606int asd_lu_reset(struct domain_device *dev, u8 *lun)
607{
608 int res = asd_initiate_ssp_tmf(dev, lun, TMF_LU_RESET, 0);
609
610 if (res == TMF_RESP_FUNC_COMPLETE)
611 asd_clear_nexus_I_T_L(dev, lun);
612 return res;
613}
614
615/**
616 * asd_query_task -- send a QUERY TASK TMF to an I_T_L_Q nexus
617 * task: pointer to sas_task struct of interest
618 *
619 * Returns: TMF_RESP_FUNC_COMPLETE if the task is not in the task set,
620 * or TMF_RESP_FUNC_SUCC if the task is in the task set.
621 *
622 * Normally the management layer sets the task to aborted state,
623 * and then calls query task and then abort task.
624 */
625int asd_query_task(struct sas_task *task)
626{
627 struct asd_ascb *ascb = task->lldd_task;
628 int index;
629
630 if (ascb) {
631 index = ascb->tc_index;
632 return asd_initiate_ssp_tmf(task->dev, task->ssp_task.LUN,
633 TMF_QUERY_TASK, index);
634 }
635 return TMF_RESP_FUNC_COMPLETE;
636}
diff --git a/drivers/scsi/arcmsr/Makefile b/drivers/scsi/arcmsr/Makefile
new file mode 100644
index 000000000000..721aced39168
--- /dev/null
+++ b/drivers/scsi/arcmsr/Makefile
@@ -0,0 +1,6 @@
1# File: drivers/arcmsr/Makefile
2# Makefile for the ARECA PCI-X PCI-EXPRESS SATA RAID controllers SCSI driver.
3
4arcmsr-objs := arcmsr_attr.o arcmsr_hba.o
5
6obj-$(CONFIG_SCSI_ARCMSR) := arcmsr.o
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
new file mode 100644
index 000000000000..aff96db9ccf6
--- /dev/null
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -0,0 +1,472 @@
1/*
2*******************************************************************************
3** O.S : Linux
4** FILE NAME : arcmsr.h
5** BY : Erich Chen
6** Description: SCSI RAID Device Driver for
7** ARECA RAID Host adapter
8*******************************************************************************
9** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved.
10**
11** Web site: www.areca.com.tw
12** E-mail: erich@areca.com.tw
13**
14** This program is free software; you can redistribute it and/or modify
15** it under the terms of the GNU General Public License version 2 as
16** published by the Free Software Foundation.
17** This program is distributed in the hope that it will be useful,
18** but WITHOUT ANY WARRANTY; without even the implied warranty of
19** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20** GNU General Public License for more details.
21*******************************************************************************
22** Redistribution and use in source and binary forms, with or without
23** modification, are permitted provided that the following conditions
24** are met:
25** 1. Redistributions of source code must retain the above copyright
26** notice, this list of conditions and the following disclaimer.
27** 2. Redistributions in binary form must reproduce the above copyright
28** notice, this list of conditions and the following disclaimer in the
29** documentation and/or other materials provided with the distribution.
30** 3. The name of the author may not be used to endorse or promote products
31** derived from this software without specific prior written permission.
32**
33** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT
38** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
40** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41**(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
42** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43*******************************************************************************
44*/
45#include <linux/interrupt.h>
46
47struct class_device_attribute;
48
49#define ARCMSR_MAX_OUTSTANDING_CMD 256
50#define ARCMSR_MAX_FREECCB_NUM 288
51#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.13"
52#define ARCMSR_SCSI_INITIATOR_ID 255
53#define ARCMSR_MAX_XFER_SECTORS 512
54#define ARCMSR_MAX_TARGETID 17
55#define ARCMSR_MAX_TARGETLUN 8
56#define ARCMSR_MAX_CMD_PERLUN ARCMSR_MAX_OUTSTANDING_CMD
57#define ARCMSR_MAX_QBUFFER 4096
58#define ARCMSR_MAX_SG_ENTRIES 38
59
60/*
61*******************************************************************************
62** split 64bits dma addressing
63*******************************************************************************
64*/
65#define dma_addr_hi32(addr) (uint32_t) ((addr>>16)>>16)
66#define dma_addr_lo32(addr) (uint32_t) (addr & 0xffffffff)
67/*
68*******************************************************************************
69** MESSAGE CONTROL CODE
70*******************************************************************************
71*/
72struct CMD_MESSAGE
73{
74 uint32_t HeaderLength;
75 uint8_t Signature[8];
76 uint32_t Timeout;
77 uint32_t ControlCode;
78 uint32_t ReturnCode;
79 uint32_t Length;
80};
81/*
82*******************************************************************************
83** IOP Message Transfer Data for user space
84*******************************************************************************
85*/
86struct CMD_MESSAGE_FIELD
87{
88 struct CMD_MESSAGE cmdmessage;
89 uint8_t messagedatabuffer[1032];
90};
91/* IOP message transfer */
92#define ARCMSR_MESSAGE_FAIL 0x0001
93/* DeviceType */
94#define ARECA_SATA_RAID 0x90000000
95/* FunctionCode */
96#define FUNCTION_READ_RQBUFFER 0x0801
97#define FUNCTION_WRITE_WQBUFFER 0x0802
98#define FUNCTION_CLEAR_RQBUFFER 0x0803
99#define FUNCTION_CLEAR_WQBUFFER 0x0804
100#define FUNCTION_CLEAR_ALLQBUFFER 0x0805
101#define FUNCTION_RETURN_CODE_3F 0x0806
102#define FUNCTION_SAY_HELLO 0x0807
103#define FUNCTION_SAY_GOODBYE 0x0808
104#define FUNCTION_FLUSH_ADAPTER_CACHE 0x0809
105/* ARECA IO CONTROL CODE*/
106#define ARCMSR_MESSAGE_READ_RQBUFFER \
107 ARECA_SATA_RAID | FUNCTION_READ_RQBUFFER
108#define ARCMSR_MESSAGE_WRITE_WQBUFFER \
109 ARECA_SATA_RAID | FUNCTION_WRITE_WQBUFFER
110#define ARCMSR_MESSAGE_CLEAR_RQBUFFER \
111 ARECA_SATA_RAID | FUNCTION_CLEAR_RQBUFFER
112#define ARCMSR_MESSAGE_CLEAR_WQBUFFER \
113 ARECA_SATA_RAID | FUNCTION_CLEAR_WQBUFFER
114#define ARCMSR_MESSAGE_CLEAR_ALLQBUFFER \
115 ARECA_SATA_RAID | FUNCTION_CLEAR_ALLQBUFFER
116#define ARCMSR_MESSAGE_RETURN_CODE_3F \
117 ARECA_SATA_RAID | FUNCTION_RETURN_CODE_3F
118#define ARCMSR_MESSAGE_SAY_HELLO \
119 ARECA_SATA_RAID | FUNCTION_SAY_HELLO
120#define ARCMSR_MESSAGE_SAY_GOODBYE \
121 ARECA_SATA_RAID | FUNCTION_SAY_GOODBYE
122#define ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE \
123 ARECA_SATA_RAID | FUNCTION_FLUSH_ADAPTER_CACHE
124/* ARECA IOCTL ReturnCode */
125#define ARCMSR_MESSAGE_RETURNCODE_OK 0x00000001
126#define ARCMSR_MESSAGE_RETURNCODE_ERROR 0x00000006
127#define ARCMSR_MESSAGE_RETURNCODE_3F 0x0000003F
128/*
129*************************************************************
130** structure for holding DMA address data
131*************************************************************
132*/
133#define IS_SG64_ADDR 0x01000000 /* bit24 */
134struct SG32ENTRY
135{
136 uint32_t length;
137 uint32_t address;
138};
139struct SG64ENTRY
140{
141 uint32_t length;
142 uint32_t address;
143 uint32_t addresshigh;
144};
145struct SGENTRY_UNION
146{
147 union
148 {
149 struct SG32ENTRY sg32entry;
150 struct SG64ENTRY sg64entry;
151 }u;
152};
153/*
154********************************************************************
155** Q Buffer of IOP Message Transfer
156********************************************************************
157*/
158struct QBUFFER
159{
160 uint32_t data_len;
161 uint8_t data[124];
162};
163/*
164*******************************************************************************
165** FIRMWARE INFO
166*******************************************************************************
167*/
168struct FIRMWARE_INFO
169{
170 uint32_t signature; /*0, 00-03*/
171 uint32_t request_len; /*1, 04-07*/
172 uint32_t numbers_queue; /*2, 08-11*/
173 uint32_t sdram_size; /*3, 12-15*/
174 uint32_t ide_channels; /*4, 16-19*/
175 char vendor[40]; /*5, 20-59*/
176 char model[8]; /*15, 60-67*/
177 char firmware_ver[16]; /*17, 68-83*/
178 char device_map[16]; /*21, 84-99*/
179};
180/* signature of set and get firmware config */
181#define ARCMSR_SIGNATURE_GET_CONFIG 0x87974060
182#define ARCMSR_SIGNATURE_SET_CONFIG 0x87974063
183/* message code of inbound message register */
184#define ARCMSR_INBOUND_MESG0_NOP 0x00000000
185#define ARCMSR_INBOUND_MESG0_GET_CONFIG 0x00000001
186#define ARCMSR_INBOUND_MESG0_SET_CONFIG 0x00000002
187#define ARCMSR_INBOUND_MESG0_ABORT_CMD 0x00000003
188#define ARCMSR_INBOUND_MESG0_STOP_BGRB 0x00000004
189#define ARCMSR_INBOUND_MESG0_FLUSH_CACHE 0x00000005
190#define ARCMSR_INBOUND_MESG0_START_BGRB 0x00000006
191#define ARCMSR_INBOUND_MESG0_CHK331PENDING 0x00000007
192#define ARCMSR_INBOUND_MESG0_SYNC_TIMER 0x00000008
193/* doorbell interrupt generator */
194#define ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK 0x00000001
195#define ARCMSR_INBOUND_DRIVER_DATA_READ_OK 0x00000002
196#define ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK 0x00000001
197#define ARCMSR_OUTBOUND_IOP331_DATA_READ_OK 0x00000002
198/* ccb areca cdb flag */
199#define ARCMSR_CCBPOST_FLAG_SGL_BSIZE 0x80000000
200#define ARCMSR_CCBPOST_FLAG_IAM_BIOS 0x40000000
201#define ARCMSR_CCBREPLY_FLAG_IAM_BIOS 0x40000000
202#define ARCMSR_CCBREPLY_FLAG_ERROR 0x10000000
203/* outbound firmware ok */
204#define ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK 0x80000000
205/*
206*******************************************************************************
207** ARECA SCSI COMMAND DESCRIPTOR BLOCK size 0x1F8 (504)
208*******************************************************************************
209*/
210struct ARCMSR_CDB
211{
212 uint8_t Bus;
213 uint8_t TargetID;
214 uint8_t LUN;
215 uint8_t Function;
216
217 uint8_t CdbLength;
218 uint8_t sgcount;
219 uint8_t Flags;
220#define ARCMSR_CDB_FLAG_SGL_BSIZE 0x01
221#define ARCMSR_CDB_FLAG_BIOS 0x02
222#define ARCMSR_CDB_FLAG_WRITE 0x04
223#define ARCMSR_CDB_FLAG_SIMPLEQ 0x00
224#define ARCMSR_CDB_FLAG_HEADQ 0x08
225#define ARCMSR_CDB_FLAG_ORDEREDQ 0x10
226 uint8_t Reserved1;
227
228 uint32_t Context;
229 uint32_t DataLength;
230
231 uint8_t Cdb[16];
232
233 uint8_t DeviceStatus;
234#define ARCMSR_DEV_CHECK_CONDITION 0x02
235#define ARCMSR_DEV_SELECT_TIMEOUT 0xF0
236#define ARCMSR_DEV_ABORTED 0xF1
237#define ARCMSR_DEV_INIT_FAIL 0xF2
238 uint8_t SenseData[15];
239
240 union
241 {
242 struct SG32ENTRY sg32entry[ARCMSR_MAX_SG_ENTRIES];
243 struct SG64ENTRY sg64entry[ARCMSR_MAX_SG_ENTRIES];
244 } u;
245};
246/*
247*******************************************************************************
248** Messaging Unit (MU) of the Intel R 80331 I/O processor (80331)
249*******************************************************************************
250*/
251struct MessageUnit
252{
253 uint32_t resrved0[4]; /*0000 000F*/
254 uint32_t inbound_msgaddr0; /*0010 0013*/
255 uint32_t inbound_msgaddr1; /*0014 0017*/
256 uint32_t outbound_msgaddr0; /*0018 001B*/
257 uint32_t outbound_msgaddr1; /*001C 001F*/
258 uint32_t inbound_doorbell; /*0020 0023*/
259 uint32_t inbound_intstatus; /*0024 0027*/
260 uint32_t inbound_intmask; /*0028 002B*/
261 uint32_t outbound_doorbell; /*002C 002F*/
262 uint32_t outbound_intstatus; /*0030 0033*/
263 uint32_t outbound_intmask; /*0034 0037*/
264 uint32_t reserved1[2]; /*0038 003F*/
265 uint32_t inbound_queueport; /*0040 0043*/
266 uint32_t outbound_queueport; /*0044 0047*/
267 uint32_t reserved2[2]; /*0048 004F*/
268 uint32_t reserved3[492]; /*0050 07FF 492*/
269 uint32_t reserved4[128]; /*0800 09FF 128*/
270 uint32_t message_rwbuffer[256]; /*0a00 0DFF 256*/
271 uint32_t message_wbuffer[32]; /*0E00 0E7F 32*/
272 uint32_t reserved5[32]; /*0E80 0EFF 32*/
273 uint32_t message_rbuffer[32]; /*0F00 0F7F 32*/
274 uint32_t reserved6[32]; /*0F80 0FFF 32*/
275};
276/*
277*******************************************************************************
278** Adapter Control Block
279*******************************************************************************
280*/
281struct AdapterControlBlock
282{
283 struct pci_dev * pdev;
284 struct Scsi_Host * host;
285 unsigned long vir2phy_offset;
286 /* Offset is used in making arc cdb physical to virtual calculations */
287 uint32_t outbound_int_enable;
288
289 struct MessageUnit __iomem * pmu;
290 /* message unit ATU inbound base address0 */
291
292 uint32_t acb_flags;
293#define ACB_F_SCSISTOPADAPTER 0x0001
294#define ACB_F_MSG_STOP_BGRB 0x0002
295 /* stop RAID background rebuild */
296#define ACB_F_MSG_START_BGRB 0x0004
297 /* stop RAID background rebuild */
298#define ACB_F_IOPDATA_OVERFLOW 0x0008
299 /* iop message data rqbuffer overflow */
300#define ACB_F_MESSAGE_WQBUFFER_CLEARED 0x0010
301 /* message clear wqbuffer */
302#define ACB_F_MESSAGE_RQBUFFER_CLEARED 0x0020
303 /* message clear rqbuffer */
304#define ACB_F_MESSAGE_WQBUFFER_READED 0x0040
305#define ACB_F_BUS_RESET 0x0080
306#define ACB_F_IOP_INITED 0x0100
307 /* iop init */
308
309 struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
310 /* used for memory free */
311 struct list_head ccb_free_list;
312 /* head of free ccb list */
313 atomic_t ccboutstandingcount;
314
315 void * dma_coherent;
316 /* dma_coherent used for memory free */
317 dma_addr_t dma_coherent_handle;
318 /* dma_coherent_handle used for memory free */
319
320 uint8_t rqbuffer[ARCMSR_MAX_QBUFFER];
321 /* data collection buffer for read from 80331 */
322 int32_t rqbuf_firstindex;
323 /* first of read buffer */
324 int32_t rqbuf_lastindex;
325 /* last of read buffer */
326 uint8_t wqbuffer[ARCMSR_MAX_QBUFFER];
327 /* data collection buffer for write to 80331 */
328 int32_t wqbuf_firstindex;
329 /* first of write buffer */
330 int32_t wqbuf_lastindex;
331 /* last of write buffer */
332 uint8_t devstate[ARCMSR_MAX_TARGETID][ARCMSR_MAX_TARGETLUN];
333 /* id0 ..... id15, lun0...lun7 */
334#define ARECA_RAID_GONE 0x55
335#define ARECA_RAID_GOOD 0xaa
336 uint32_t num_resets;
337 uint32_t num_aborts;
338 uint32_t firm_request_len;
339 uint32_t firm_numbers_queue;
340 uint32_t firm_sdram_size;
341 uint32_t firm_hd_channels;
342 char firm_model[12];
343 char firm_version[20];
344};/* HW_DEVICE_EXTENSION */
345/*
346*******************************************************************************
347** Command Control Block
348** this CCB length must be 32 bytes boundary
349*******************************************************************************
350*/
351struct CommandControlBlock
352{
353 struct ARCMSR_CDB arcmsr_cdb;
354 /*
355 ** 0-503 (size of CDB=504):
356 ** arcmsr messenger scsi command descriptor size 504 bytes
357 */
358 uint32_t cdb_shifted_phyaddr;
359 /* 504-507 */
360 uint32_t reserved1;
361 /* 508-511 */
362#if BITS_PER_LONG == 64
363 /* ======================512+64 bytes======================== */
364 struct list_head list;
365 /* 512-527 16 bytes next/prev ptrs for ccb lists */
366 struct scsi_cmnd * pcmd;
367 /* 528-535 8 bytes pointer of linux scsi command */
368 struct AdapterControlBlock * acb;
369 /* 536-543 8 bytes pointer of acb */
370
371 uint16_t ccb_flags;
372 /* 544-545 */
373 #define CCB_FLAG_READ 0x0000
374 #define CCB_FLAG_WRITE 0x0001
375 #define CCB_FLAG_ERROR 0x0002
376 #define CCB_FLAG_FLUSHCACHE 0x0004
377 #define CCB_FLAG_MASTER_ABORTED 0x0008
378 uint16_t startdone;
379 /* 546-547 */
380 #define ARCMSR_CCB_DONE 0x0000
381 #define ARCMSR_CCB_START 0x55AA
382 #define ARCMSR_CCB_ABORTED 0xAA55
383 #define ARCMSR_CCB_ILLEGAL 0xFFFF
384 uint32_t reserved2[7];
385 /* 548-551 552-555 556-559 560-563 564-567 568-571 572-575 */
386#else
387 /* ======================512+32 bytes======================== */
388 struct list_head list;
389 /* 512-519 8 bytes next/prev ptrs for ccb lists */
390 struct scsi_cmnd * pcmd;
391 /* 520-523 4 bytes pointer of linux scsi command */
392 struct AdapterControlBlock * acb;
393 /* 524-527 4 bytes pointer of acb */
394
395 uint16_t ccb_flags;
396 /* 528-529 */
397 #define CCB_FLAG_READ 0x0000
398 #define CCB_FLAG_WRITE 0x0001
399 #define CCB_FLAG_ERROR 0x0002
400 #define CCB_FLAG_FLUSHCACHE 0x0004
401 #define CCB_FLAG_MASTER_ABORTED 0x0008
402 uint16_t startdone;
403 /* 530-531 */
404 #define ARCMSR_CCB_DONE 0x0000
405 #define ARCMSR_CCB_START 0x55AA
406 #define ARCMSR_CCB_ABORTED 0xAA55
407 #define ARCMSR_CCB_ILLEGAL 0xFFFF
408 uint32_t reserved2[3];
409 /* 532-535 536-539 540-543 */
410#endif
411 /* ========================================================== */
412};
413/*
414*******************************************************************************
415** ARECA SCSI sense data
416*******************************************************************************
417*/
418struct SENSE_DATA
419{
420 uint8_t ErrorCode:7;
421#define SCSI_SENSE_CURRENT_ERRORS 0x70
422#define SCSI_SENSE_DEFERRED_ERRORS 0x71
423 uint8_t Valid:1;
424 uint8_t SegmentNumber;
425 uint8_t SenseKey:4;
426 uint8_t Reserved:1;
427 uint8_t IncorrectLength:1;
428 uint8_t EndOfMedia:1;
429 uint8_t FileMark:1;
430 uint8_t Information[4];
431 uint8_t AdditionalSenseLength;
432 uint8_t CommandSpecificInformation[4];
433 uint8_t AdditionalSenseCode;
434 uint8_t AdditionalSenseCodeQualifier;
435 uint8_t FieldReplaceableUnitCode;
436 uint8_t SenseKeySpecific[3];
437};
438/*
439*******************************************************************************
440** Outbound Interrupt Status Register - OISR
441*******************************************************************************
442*/
443#define ARCMSR_MU_OUTBOUND_INTERRUPT_STATUS_REG 0x30
444#define ARCMSR_MU_OUTBOUND_PCI_INT 0x10
445#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INT 0x08
446#define ARCMSR_MU_OUTBOUND_DOORBELL_INT 0x04
447#define ARCMSR_MU_OUTBOUND_MESSAGE1_INT 0x02
448#define ARCMSR_MU_OUTBOUND_MESSAGE0_INT 0x01
449#define ARCMSR_MU_OUTBOUND_HANDLE_INT \
450 (ARCMSR_MU_OUTBOUND_MESSAGE0_INT \
451 |ARCMSR_MU_OUTBOUND_MESSAGE1_INT \
452 |ARCMSR_MU_OUTBOUND_DOORBELL_INT \
453 |ARCMSR_MU_OUTBOUND_POSTQUEUE_INT \
454 |ARCMSR_MU_OUTBOUND_PCI_INT)
455/*
456*******************************************************************************
457** Outbound Interrupt Mask Register - OIMR
458*******************************************************************************
459*/
460#define ARCMSR_MU_OUTBOUND_INTERRUPT_MASK_REG 0x34
461#define ARCMSR_MU_OUTBOUND_PCI_INTMASKENABLE 0x10
462#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE 0x08
463#define ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE 0x04
464#define ARCMSR_MU_OUTBOUND_MESSAGE1_INTMASKENABLE 0x02
465#define ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE 0x01
466#define ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE 0x1F
467
468extern void arcmsr_post_Qbuffer(struct AdapterControlBlock *acb);
469extern struct class_device_attribute *arcmsr_host_attrs[];
470extern int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *acb);
471void arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb);
472
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
new file mode 100644
index 000000000000..12497da5529d
--- /dev/null
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -0,0 +1,381 @@
1/*
2*******************************************************************************
3** O.S : Linux
4** FILE NAME : arcmsr_attr.c
5** BY : Erich Chen
6** Description: attributes exported to sysfs and device host
7*******************************************************************************
8** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
9**
10** Web site: www.areca.com.tw
11** E-mail: erich@areca.com.tw
12**
13** This program is free software; you can redistribute it and/or modify
14** it under the terms of the GNU General Public License version 2 as
15** published by the Free Software Foundation.
16** This program is distributed in the hope that it will be useful,
17** but WITHOUT ANY WARRANTY; without even the implied warranty of
18** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19** GNU General Public License for more details.
20*******************************************************************************
21** Redistribution and use in source and binary forms, with or without
22** modification, are permitted provided that the following conditions
23** are met:
24** 1. Redistributions of source code must retain the above copyright
25** notice, this list of conditions and the following disclaimer.
26** 2. Redistributions in binary form must reproduce the above copyright
27** notice, this list of conditions and the following disclaimer in the
28** documentation and/or other materials provided with the distribution.
29** 3. The name of the author may not be used to endorse or promote products
30** derived from this software without specific prior written permission.
31**
32** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
33** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
34** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
35** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
36** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
37** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
39** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
41** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42*******************************************************************************
43** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
44** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
45*******************************************************************************
46*/
47#include <linux/module.h>
48#include <linux/kernel.h>
49#include <linux/init.h>
50#include <linux/errno.h>
51#include <linux/delay.h>
52#include <linux/pci.h>
53
54#include <scsi/scsi_cmnd.h>
55#include <scsi/scsi_device.h>
56#include <scsi/scsi_host.h>
57#include <scsi/scsi_transport.h>
58#include "arcmsr.h"
59
60struct class_device_attribute *arcmsr_host_attrs[];
61
62static ssize_t
63arcmsr_sysfs_iop_message_read(struct kobject *kobj, char *buf, loff_t off,
64 size_t count)
65{
66 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
67 struct Scsi_Host *host = class_to_shost(cdev);
68 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
69 struct MessageUnit __iomem *reg = acb->pmu;
70 uint8_t *pQbuffer,*ptmpQbuffer;
71 int32_t allxfer_len = 0;
72
73 if (!capable(CAP_SYS_ADMIN))
74 return -EACCES;
75
76 /* do message unit read. */
77 ptmpQbuffer = (uint8_t *)buf;
78 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
79 && (allxfer_len < 1031)) {
80 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
81 memcpy(ptmpQbuffer, pQbuffer, 1);
82 acb->rqbuf_firstindex++;
83 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
84 ptmpQbuffer++;
85 allxfer_len++;
86 }
87 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
88 struct QBUFFER __iomem * prbuffer = (struct QBUFFER __iomem *)
89 &reg->message_rbuffer;
90 uint8_t __iomem * iop_data = (uint8_t __iomem *)prbuffer->data;
91 int32_t iop_len;
92
93 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
94 iop_len = readl(&prbuffer->data_len);
95 while (iop_len > 0) {
96 acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
97 acb->rqbuf_lastindex++;
98 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
99 iop_data++;
100 iop_len--;
101 }
102 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
103 &reg->inbound_doorbell);
104 }
105 return (allxfer_len);
106}
107
108static ssize_t
109arcmsr_sysfs_iop_message_write(struct kobject *kobj, char *buf, loff_t off,
110 size_t count)
111{
112 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
113 struct Scsi_Host *host = class_to_shost(cdev);
114 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
115 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
116 uint8_t *pQbuffer, *ptmpuserbuffer;
117
118 if (!capable(CAP_SYS_ADMIN))
119 return -EACCES;
120 if (count > 1032)
121 return -EINVAL;
122 /* do message unit write. */
123 ptmpuserbuffer = (uint8_t *)buf;
124 user_len = (int32_t)count;
125 wqbuf_lastindex = acb->wqbuf_lastindex;
126 wqbuf_firstindex = acb->wqbuf_firstindex;
127 if (wqbuf_lastindex != wqbuf_firstindex) {
128 arcmsr_post_Qbuffer(acb);
129 return 0; /*need retry*/
130 } else {
131 my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
132 &(ARCMSR_MAX_QBUFFER - 1);
133 if (my_empty_len >= user_len) {
134 while (user_len > 0) {
135 pQbuffer =
136 &acb->wqbuffer[acb->wqbuf_lastindex];
137 memcpy(pQbuffer, ptmpuserbuffer, 1);
138 acb->wqbuf_lastindex++;
139 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
140 ptmpuserbuffer++;
141 user_len--;
142 }
143 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
144 acb->acb_flags &=
145 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
146 arcmsr_post_Qbuffer(acb);
147 }
148 return count;
149 } else {
150 return 0; /*need retry*/
151 }
152 }
153}
154
155static ssize_t
156arcmsr_sysfs_iop_message_clear(struct kobject *kobj, char *buf, loff_t off,
157 size_t count)
158{
159 struct class_device *cdev = container_of(kobj,struct class_device,kobj);
160 struct Scsi_Host *host = class_to_shost(cdev);
161 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
162 struct MessageUnit __iomem *reg = acb->pmu;
163 uint8_t *pQbuffer;
164
165 if (!capable(CAP_SYS_ADMIN))
166 return -EACCES;
167
168 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
169 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
170 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK
171 , &reg->inbound_doorbell);
172 }
173 acb->acb_flags |=
174 (ACB_F_MESSAGE_WQBUFFER_CLEARED
175 | ACB_F_MESSAGE_RQBUFFER_CLEARED
176 | ACB_F_MESSAGE_WQBUFFER_READED);
177 acb->rqbuf_firstindex = 0;
178 acb->rqbuf_lastindex = 0;
179 acb->wqbuf_firstindex = 0;
180 acb->wqbuf_lastindex = 0;
181 pQbuffer = acb->rqbuffer;
182 memset(pQbuffer, 0, sizeof (struct QBUFFER));
183 pQbuffer = acb->wqbuffer;
184 memset(pQbuffer, 0, sizeof (struct QBUFFER));
185 return 1;
186}
187
188static struct bin_attribute arcmsr_sysfs_message_read_attr = {
189 .attr = {
190 .name = "mu_read",
191 .mode = S_IRUSR ,
192 .owner = THIS_MODULE,
193 },
194 .size = 1032,
195 .read = arcmsr_sysfs_iop_message_read,
196};
197
198static struct bin_attribute arcmsr_sysfs_message_write_attr = {
199 .attr = {
200 .name = "mu_write",
201 .mode = S_IWUSR,
202 .owner = THIS_MODULE,
203 },
204 .size = 1032,
205 .write = arcmsr_sysfs_iop_message_write,
206};
207
208static struct bin_attribute arcmsr_sysfs_message_clear_attr = {
209 .attr = {
210 .name = "mu_clear",
211 .mode = S_IWUSR,
212 .owner = THIS_MODULE,
213 },
214 .size = 1,
215 .write = arcmsr_sysfs_iop_message_clear,
216};
217
218int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *acb)
219{
220 struct Scsi_Host *host = acb->host;
221 int error;
222
223 error = sysfs_create_bin_file(&host->shost_classdev.kobj,
224 &arcmsr_sysfs_message_read_attr);
225 if (error) {
226 printk(KERN_ERR "arcmsr: alloc sysfs mu_read failed\n");
227 goto error_bin_file_message_read;
228 }
229 error = sysfs_create_bin_file(&host->shost_classdev.kobj,
230 &arcmsr_sysfs_message_write_attr);
231 if (error) {
232 printk(KERN_ERR "arcmsr: alloc sysfs mu_write failed\n");
233 goto error_bin_file_message_write;
234 }
235 error = sysfs_create_bin_file(&host->shost_classdev.kobj,
236 &arcmsr_sysfs_message_clear_attr);
237 if (error) {
238 printk(KERN_ERR "arcmsr: alloc sysfs mu_clear failed\n");
239 goto error_bin_file_message_clear;
240 }
241 return 0;
242error_bin_file_message_clear:
243 sysfs_remove_bin_file(&host->shost_classdev.kobj,
244 &arcmsr_sysfs_message_write_attr);
245error_bin_file_message_write:
246 sysfs_remove_bin_file(&host->shost_classdev.kobj,
247 &arcmsr_sysfs_message_read_attr);
248error_bin_file_message_read:
249 return error;
250}
251
252void
253arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb) {
254 struct Scsi_Host *host = acb->host;
255
256 sysfs_remove_bin_file(&host->shost_classdev.kobj,
257 &arcmsr_sysfs_message_clear_attr);
258 sysfs_remove_bin_file(&host->shost_classdev.kobj,
259 &arcmsr_sysfs_message_write_attr);
260 sysfs_remove_bin_file(&host->shost_classdev.kobj,
261 &arcmsr_sysfs_message_read_attr);
262}
263
264
265static ssize_t
266arcmsr_attr_host_driver_version(struct class_device *cdev, char *buf) {
267 return snprintf(buf, PAGE_SIZE,
268 "%s\n",
269 ARCMSR_DRIVER_VERSION);
270}
271
272static ssize_t
273arcmsr_attr_host_driver_posted_cmd(struct class_device *cdev, char *buf) {
274 struct Scsi_Host *host = class_to_shost(cdev);
275 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
276 return snprintf(buf, PAGE_SIZE,
277 "%4d\n",
278 atomic_read(&acb->ccboutstandingcount));
279}
280
281static ssize_t
282arcmsr_attr_host_driver_reset(struct class_device *cdev, char *buf) {
283 struct Scsi_Host *host = class_to_shost(cdev);
284 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
285 return snprintf(buf, PAGE_SIZE,
286 "%4d\n",
287 acb->num_resets);
288}
289
290static ssize_t
291arcmsr_attr_host_driver_abort(struct class_device *cdev, char *buf) {
292 struct Scsi_Host *host = class_to_shost(cdev);
293 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
294 return snprintf(buf, PAGE_SIZE,
295 "%4d\n",
296 acb->num_aborts);
297}
298
299static ssize_t
300arcmsr_attr_host_fw_model(struct class_device *cdev, char *buf) {
301 struct Scsi_Host *host = class_to_shost(cdev);
302 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
303 return snprintf(buf, PAGE_SIZE,
304 "%s\n",
305 acb->firm_model);
306}
307
308static ssize_t
309arcmsr_attr_host_fw_version(struct class_device *cdev, char *buf) {
310 struct Scsi_Host *host = class_to_shost(cdev);
311 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
312
313 return snprintf(buf, PAGE_SIZE,
314 "%s\n",
315 acb->firm_version);
316}
317
318static ssize_t
319arcmsr_attr_host_fw_request_len(struct class_device *cdev, char *buf) {
320 struct Scsi_Host *host = class_to_shost(cdev);
321 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
322
323 return snprintf(buf, PAGE_SIZE,
324 "%4d\n",
325 acb->firm_request_len);
326}
327
328static ssize_t
329arcmsr_attr_host_fw_numbers_queue(struct class_device *cdev, char *buf) {
330 struct Scsi_Host *host = class_to_shost(cdev);
331 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
332
333 return snprintf(buf, PAGE_SIZE,
334 "%4d\n",
335 acb->firm_numbers_queue);
336}
337
338static ssize_t
339arcmsr_attr_host_fw_sdram_size(struct class_device *cdev, char *buf) {
340 struct Scsi_Host *host = class_to_shost(cdev);
341 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
342
343 return snprintf(buf, PAGE_SIZE,
344 "%4d\n",
345 acb->firm_sdram_size);
346}
347
348static ssize_t
349arcmsr_attr_host_fw_hd_channels(struct class_device *cdev, char *buf) {
350 struct Scsi_Host *host = class_to_shost(cdev);
351 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
352
353 return snprintf(buf, PAGE_SIZE,
354 "%4d\n",
355 acb->firm_hd_channels);
356}
357
358static CLASS_DEVICE_ATTR(host_driver_version, S_IRUGO, arcmsr_attr_host_driver_version, NULL);
359static CLASS_DEVICE_ATTR(host_driver_posted_cmd, S_IRUGO, arcmsr_attr_host_driver_posted_cmd, NULL);
360static CLASS_DEVICE_ATTR(host_driver_reset, S_IRUGO, arcmsr_attr_host_driver_reset, NULL);
361static CLASS_DEVICE_ATTR(host_driver_abort, S_IRUGO, arcmsr_attr_host_driver_abort, NULL);
362static CLASS_DEVICE_ATTR(host_fw_model, S_IRUGO, arcmsr_attr_host_fw_model, NULL);
363static CLASS_DEVICE_ATTR(host_fw_version, S_IRUGO, arcmsr_attr_host_fw_version, NULL);
364static CLASS_DEVICE_ATTR(host_fw_request_len, S_IRUGO, arcmsr_attr_host_fw_request_len, NULL);
365static CLASS_DEVICE_ATTR(host_fw_numbers_queue, S_IRUGO, arcmsr_attr_host_fw_numbers_queue, NULL);
366static CLASS_DEVICE_ATTR(host_fw_sdram_size, S_IRUGO, arcmsr_attr_host_fw_sdram_size, NULL);
367static CLASS_DEVICE_ATTR(host_fw_hd_channels, S_IRUGO, arcmsr_attr_host_fw_hd_channels, NULL);
368
369struct class_device_attribute *arcmsr_host_attrs[] = {
370 &class_device_attr_host_driver_version,
371 &class_device_attr_host_driver_posted_cmd,
372 &class_device_attr_host_driver_reset,
373 &class_device_attr_host_driver_abort,
374 &class_device_attr_host_fw_model,
375 &class_device_attr_host_fw_version,
376 &class_device_attr_host_fw_request_len,
377 &class_device_attr_host_fw_numbers_queue,
378 &class_device_attr_host_fw_sdram_size,
379 &class_device_attr_host_fw_hd_channels,
380 NULL,
381};
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
new file mode 100644
index 000000000000..475f978ff8f0
--- /dev/null
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -0,0 +1,1496 @@
1/*
2*******************************************************************************
3** O.S : Linux
4** FILE NAME : arcmsr_hba.c
5** BY : Erich Chen
6** Description: SCSI RAID Device Driver for
7** ARECA RAID Host adapter
8*******************************************************************************
9** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
10**
11** Web site: www.areca.com.tw
12** E-mail: erich@areca.com.tw
13**
14** This program is free software; you can redistribute it and/or modify
15** it under the terms of the GNU General Public License version 2 as
16** published by the Free Software Foundation.
17** This program is distributed in the hope that it will be useful,
18** but WITHOUT ANY WARRANTY; without even the implied warranty of
19** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20** GNU General Public License for more details.
21*******************************************************************************
22** Redistribution and use in source and binary forms, with or without
23** modification, are permitted provided that the following conditions
24** are met:
25** 1. Redistributions of source code must retain the above copyright
26** notice, this list of conditions and the following disclaimer.
27** 2. Redistributions in binary form must reproduce the above copyright
28** notice, this list of conditions and the following disclaimer in the
29** documentation and/or other materials provided with the distribution.
30** 3. The name of the author may not be used to endorse or promote products
31** derived from this software without specific prior written permission.
32**
33** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
38** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
40** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
42** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43*******************************************************************************
44** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
45** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
46*******************************************************************************
47*/
48#include <linux/module.h>
49#include <linux/reboot.h>
50#include <linux/spinlock.h>
51#include <linux/pci_ids.h>
52#include <linux/interrupt.h>
53#include <linux/moduleparam.h>
54#include <linux/errno.h>
55#include <linux/types.h>
56#include <linux/delay.h>
57#include <linux/dma-mapping.h>
58#include <linux/timer.h>
59#include <linux/pci.h>
60#include <asm/dma.h>
61#include <asm/io.h>
62#include <asm/system.h>
63#include <asm/uaccess.h>
64#include <scsi/scsi_host.h>
65#include <scsi/scsi.h>
66#include <scsi/scsi_cmnd.h>
67#include <scsi/scsi_tcq.h>
68#include <scsi/scsi_device.h>
69#include <scsi/scsi_transport.h>
70#include <scsi/scsicam.h>
71#include "arcmsr.h"
72
73MODULE_AUTHOR("Erich Chen <erich@areca.com.tw>");
74MODULE_DESCRIPTION("ARECA (ARC11xx/12xx) SATA RAID HOST Adapter");
75MODULE_LICENSE("Dual BSD/GPL");
76MODULE_VERSION(ARCMSR_DRIVER_VERSION);
77
78static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd);
79static int arcmsr_abort(struct scsi_cmnd *);
80static int arcmsr_bus_reset(struct scsi_cmnd *);
81static int arcmsr_bios_param(struct scsi_device *sdev,
82 struct block_device *bdev, sector_t capacity, int *info);
83static int arcmsr_queue_command(struct scsi_cmnd * cmd,
84 void (*done) (struct scsi_cmnd *));
85static int arcmsr_probe(struct pci_dev *pdev,
86 const struct pci_device_id *id);
87static void arcmsr_remove(struct pci_dev *pdev);
88static void arcmsr_shutdown(struct pci_dev *pdev);
89static void arcmsr_iop_init(struct AdapterControlBlock *acb);
90static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
91static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
92static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb);
93static uint8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb);
94static const char *arcmsr_info(struct Scsi_Host *);
95static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
96
97static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
98{
99 if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
100 queue_depth = ARCMSR_MAX_CMD_PERLUN;
101 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
102 return queue_depth;
103}
104
105static struct scsi_host_template arcmsr_scsi_host_template = {
106 .module = THIS_MODULE,
107 .name = "ARCMSR ARECA SATA RAID HOST Adapter" ARCMSR_DRIVER_VERSION,
108 .info = arcmsr_info,
109 .queuecommand = arcmsr_queue_command,
110 .eh_abort_handler = arcmsr_abort,
111 .eh_bus_reset_handler = arcmsr_bus_reset,
112 .bios_param = arcmsr_bios_param,
113 .change_queue_depth = arcmsr_adjust_disk_queue_depth,
114 .can_queue = ARCMSR_MAX_OUTSTANDING_CMD,
115 .this_id = ARCMSR_SCSI_INITIATOR_ID,
116 .sg_tablesize = ARCMSR_MAX_SG_ENTRIES,
117 .max_sectors = ARCMSR_MAX_XFER_SECTORS,
118 .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
119 .use_clustering = ENABLE_CLUSTERING,
120 .shost_attrs = arcmsr_host_attrs,
121};
122
123static struct pci_device_id arcmsr_device_id_table[] = {
124 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
125 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)},
126 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130)},
127 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160)},
128 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170)},
129 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210)},
130 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220)},
131 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230)},
132 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260)},
133 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270)},
134 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280)},
135 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380)},
136 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)},
137 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)},
138 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)},
139 {0, 0}, /* Terminating entry */
140};
141MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
142static struct pci_driver arcmsr_pci_driver = {
143 .name = "arcmsr",
144 .id_table = arcmsr_device_id_table,
145 .probe = arcmsr_probe,
146 .remove = arcmsr_remove,
147 .shutdown = arcmsr_shutdown
148};
149
150static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id,
151 struct pt_regs *regs)
152{
153 irqreturn_t handle_state;
154 struct AdapterControlBlock *acb;
155 unsigned long flags;
156
157 acb = (struct AdapterControlBlock *)dev_id;
158
159 spin_lock_irqsave(acb->host->host_lock, flags);
160 handle_state = arcmsr_interrupt(acb);
161 spin_unlock_irqrestore(acb->host->host_lock, flags);
162 return handle_state;
163}
164
165static int arcmsr_bios_param(struct scsi_device *sdev,
166 struct block_device *bdev, sector_t capacity, int *geom)
167{
168 int ret, heads, sectors, cylinders, total_capacity;
169 unsigned char *buffer;/* return copy of block device's partition table */
170
171 buffer = scsi_bios_ptable(bdev);
172 if (buffer) {
173 ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]);
174 kfree(buffer);
175 if (ret != -1)
176 return ret;
177 }
178 total_capacity = capacity;
179 heads = 64;
180 sectors = 32;
181 cylinders = total_capacity / (heads * sectors);
182 if (cylinders > 1024) {
183 heads = 255;
184 sectors = 63;
185 cylinders = total_capacity / (heads * sectors);
186 }
187 geom[0] = heads;
188 geom[1] = sectors;
189 geom[2] = cylinders;
190 return 0;
191}
192
193static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
194{
195 struct pci_dev *pdev = acb->pdev;
196 struct MessageUnit __iomem *reg = acb->pmu;
197 u32 ccb_phyaddr_hi32;
198 void *dma_coherent;
199 dma_addr_t dma_coherent_handle, dma_addr;
200 struct CommandControlBlock *ccb_tmp;
201 int i, j;
202
203 dma_coherent = dma_alloc_coherent(&pdev->dev,
204 ARCMSR_MAX_FREECCB_NUM *
205 sizeof (struct CommandControlBlock) + 0x20,
206 &dma_coherent_handle, GFP_KERNEL);
207 if (!dma_coherent)
208 return -ENOMEM;
209
210 acb->dma_coherent = dma_coherent;
211 acb->dma_coherent_handle = dma_coherent_handle;
212
213 if (((unsigned long)dma_coherent & 0x1F)) {
214 dma_coherent = dma_coherent +
215 (0x20 - ((unsigned long)dma_coherent & 0x1F));
216 dma_coherent_handle = dma_coherent_handle +
217 (0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
218 }
219
220 dma_addr = dma_coherent_handle;
221 ccb_tmp = (struct CommandControlBlock *)dma_coherent;
222 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
223 ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5;
224 ccb_tmp->acb = acb;
225 acb->pccb_pool[i] = ccb_tmp;
226 list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
227 dma_addr = dma_addr + sizeof (struct CommandControlBlock);
228 ccb_tmp++;
229 }
230
231 acb->vir2phy_offset = (unsigned long)ccb_tmp -
232 (unsigned long)dma_addr;
233 for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
234 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
235 acb->devstate[i][j] = ARECA_RAID_GOOD;
236
237 /*
238 ** here we need to tell iop 331 our ccb_tmp.HighPart
239 ** if ccb_tmp.HighPart is not zero
240 */
241 ccb_phyaddr_hi32 = (uint32_t) ((dma_coherent_handle >> 16) >> 16);
242 if (ccb_phyaddr_hi32 != 0) {
243 writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->message_rwbuffer[0]);
244 writel(ccb_phyaddr_hi32, &reg->message_rwbuffer[1]);
245 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
246 if (arcmsr_wait_msgint_ready(acb))
247 printk(KERN_NOTICE "arcmsr%d: "
248 "'set ccb high part physical address' timeout\n",
249 acb->host->host_no);
250 }
251
252 writel(readl(&reg->outbound_intmask) |
253 ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE,
254 &reg->outbound_intmask);
255 return 0;
256}
257
258static int arcmsr_probe(struct pci_dev *pdev,
259 const struct pci_device_id *id)
260{
261 struct Scsi_Host *host;
262 struct AdapterControlBlock *acb;
263 uint8_t bus, dev_fun;
264 int error;
265
266 error = pci_enable_device(pdev);
267 if (error)
268 goto out;
269 pci_set_master(pdev);
270
271 host = scsi_host_alloc(&arcmsr_scsi_host_template,
272 sizeof(struct AdapterControlBlock));
273 if (!host) {
274 error = -ENOMEM;
275 goto out_disable_device;
276 }
277 acb = (struct AdapterControlBlock *)host->hostdata;
278 memset(acb, 0, sizeof (struct AdapterControlBlock));
279
280 error = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
281 if (error) {
282 error = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
283 if (error) {
284 printk(KERN_WARNING
285 "scsi%d: No suitable DMA mask available\n",
286 host->host_no);
287 goto out_host_put;
288 }
289 }
290 bus = pdev->bus->number;
291 dev_fun = pdev->devfn;
292 acb->host = host;
293 acb->pdev = pdev;
294 host->max_sectors = ARCMSR_MAX_XFER_SECTORS;
295 host->max_lun = ARCMSR_MAX_TARGETLUN;
296 host->max_id = ARCMSR_MAX_TARGETID;/*16:8*/
297 host->max_cmd_len = 16; /*this is issue of 64bit LBA, over 2T byte*/
298 host->sg_tablesize = ARCMSR_MAX_SG_ENTRIES;
299 host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */
300 host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
301 host->this_id = ARCMSR_SCSI_INITIATOR_ID;
302 host->unique_id = (bus << 8) | dev_fun;
303 host->irq = pdev->irq;
304 error = pci_request_regions(pdev, "arcmsr");
305 if (error)
306 goto out_host_put;
307
308 acb->pmu = ioremap(pci_resource_start(pdev, 0),
309 pci_resource_len(pdev, 0));
310 if (!acb->pmu) {
311 printk(KERN_NOTICE "arcmsr%d: memory"
312 " mapping region fail \n", acb->host->host_no);
313 goto out_release_regions;
314 }
315 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
316 ACB_F_MESSAGE_RQBUFFER_CLEARED |
317 ACB_F_MESSAGE_WQBUFFER_READED);
318 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
319 INIT_LIST_HEAD(&acb->ccb_free_list);
320
321 error = arcmsr_alloc_ccb_pool(acb);
322 if (error)
323 goto out_iounmap;
324
325 error = request_irq(pdev->irq, arcmsr_do_interrupt,
326 SA_INTERRUPT | SA_SHIRQ, "arcmsr", acb);
327 if (error)
328 goto out_free_ccb_pool;
329
330 arcmsr_iop_init(acb);
331 pci_set_drvdata(pdev, host);
332
333 error = scsi_add_host(host, &pdev->dev);
334 if (error)
335 goto out_free_irq;
336
337 error = arcmsr_alloc_sysfs_attr(acb);
338 if (error)
339 goto out_free_sysfs;
340
341 scsi_scan_host(host);
342 return 0;
343 out_free_sysfs:
344 out_free_irq:
345 free_irq(pdev->irq, acb);
346 out_free_ccb_pool:
347 arcmsr_free_ccb_pool(acb);
348 out_iounmap:
349 iounmap(acb->pmu);
350 out_release_regions:
351 pci_release_regions(pdev);
352 out_host_put:
353 scsi_host_put(host);
354 out_disable_device:
355 pci_disable_device(pdev);
356 out:
357 return error;
358}
359
360static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
361{
362 struct MessageUnit __iomem *reg = acb->pmu;
363
364 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
365 if (arcmsr_wait_msgint_ready(acb))
366 printk(KERN_NOTICE
367 "arcmsr%d: wait 'abort all outstanding command' timeout \n"
368 , acb->host->host_no);
369}
370
371static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
372{
373 struct AdapterControlBlock *acb = ccb->acb;
374 struct scsi_cmnd *pcmd = ccb->pcmd;
375
376 if (pcmd->use_sg != 0) {
377 struct scatterlist *sl;
378
379 sl = (struct scatterlist *)pcmd->request_buffer;
380 pci_unmap_sg(acb->pdev, sl, pcmd->use_sg, pcmd->sc_data_direction);
381 }
382 else if (pcmd->request_bufflen != 0)
383 pci_unmap_single(acb->pdev,
384 pcmd->SCp.dma_handle,
385 pcmd->request_bufflen, pcmd->sc_data_direction);
386}
387
388static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag)
389{
390 struct AdapterControlBlock *acb = ccb->acb;
391 struct scsi_cmnd *pcmd = ccb->pcmd;
392
393 arcmsr_pci_unmap_dma(ccb);
394 if (stand_flag == 1)
395 atomic_dec(&acb->ccboutstandingcount);
396 ccb->startdone = ARCMSR_CCB_DONE;
397 ccb->ccb_flags = 0;
398 list_add_tail(&ccb->list, &acb->ccb_free_list);
399 pcmd->scsi_done(pcmd);
400}
401
402static void arcmsr_remove(struct pci_dev *pdev)
403{
404 struct Scsi_Host *host = pci_get_drvdata(pdev);
405 struct AdapterControlBlock *acb =
406 (struct AdapterControlBlock *) host->hostdata;
407 struct MessageUnit __iomem *reg = acb->pmu;
408 int poll_count = 0;
409
410 arcmsr_free_sysfs_attr(acb);
411 scsi_remove_host(host);
412 arcmsr_stop_adapter_bgrb(acb);
413 arcmsr_flush_adapter_cache(acb);
414 writel(readl(&reg->outbound_intmask) |
415 ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE,
416 &reg->outbound_intmask);
417 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
418 acb->acb_flags &= ~ACB_F_IOP_INITED;
419
420 for (poll_count = 0; poll_count < 256; poll_count++) {
421 if (!atomic_read(&acb->ccboutstandingcount))
422 break;
423 arcmsr_interrupt(acb);
424 msleep(25);
425 }
426
427 if (atomic_read(&acb->ccboutstandingcount)) {
428 int i;
429
430 arcmsr_abort_allcmd(acb);
431 for (i = 0; i < ARCMSR_MAX_OUTSTANDING_CMD; i++)
432 readl(&reg->outbound_queueport);
433 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
434 struct CommandControlBlock *ccb = acb->pccb_pool[i];
435 if (ccb->startdone == ARCMSR_CCB_START) {
436 ccb->startdone = ARCMSR_CCB_ABORTED;
437 ccb->pcmd->result = DID_ABORT << 16;
438 arcmsr_ccb_complete(ccb, 1);
439 }
440 }
441 }
442
443 free_irq(pdev->irq, acb);
444 iounmap(acb->pmu);
445 arcmsr_free_ccb_pool(acb);
446 pci_release_regions(pdev);
447
448 scsi_host_put(host);
449
450 pci_disable_device(pdev);
451 pci_set_drvdata(pdev, NULL);
452}
453
454static void arcmsr_shutdown(struct pci_dev *pdev)
455{
456 struct Scsi_Host *host = pci_get_drvdata(pdev);
457 struct AdapterControlBlock *acb =
458 (struct AdapterControlBlock *)host->hostdata;
459
460 arcmsr_stop_adapter_bgrb(acb);
461 arcmsr_flush_adapter_cache(acb);
462}
463
464static int arcmsr_module_init(void)
465{
466 int error = 0;
467
468 error = pci_register_driver(&arcmsr_pci_driver);
469 return error;
470}
471
472static void arcmsr_module_exit(void)
473{
474 pci_unregister_driver(&arcmsr_pci_driver);
475}
476module_init(arcmsr_module_init);
477module_exit(arcmsr_module_exit);
478
479static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
480{
481 struct MessageUnit __iomem *reg = acb->pmu;
482 u32 orig_mask = readl(&reg->outbound_intmask);
483
484 writel(orig_mask | ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE,
485 &reg->outbound_intmask);
486 return orig_mask;
487}
488
489static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
490 u32 orig_mask)
491{
492 struct MessageUnit __iomem *reg = acb->pmu;
493 u32 mask;
494
495 mask = orig_mask & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
496 ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
497 writel(mask, &reg->outbound_intmask);
498}
499
500static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
501{
502 struct MessageUnit __iomem *reg=acb->pmu;
503
504 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
505 if (arcmsr_wait_msgint_ready(acb))
506 printk(KERN_NOTICE
507 "arcmsr%d: wait 'flush adapter cache' timeout \n"
508 , acb->host->host_no);
509}
510
511static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
512{
513 struct scsi_cmnd *pcmd = ccb->pcmd;
514 struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
515
516 pcmd->result = DID_OK << 16;
517 if (sensebuffer) {
518 int sense_data_length =
519 sizeof (struct SENSE_DATA) < sizeof (pcmd->sense_buffer)
520 ? sizeof (struct SENSE_DATA) : sizeof (pcmd->sense_buffer);
521 memset(sensebuffer, 0, sizeof (pcmd->sense_buffer));
522 memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
523 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
524 sensebuffer->Valid = 1;
525 }
526}
527
528static uint8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb)
529{
530 struct MessageUnit __iomem *reg = acb->pmu;
531 uint32_t Index;
532 uint8_t Retries = 0x00;
533
534 do {
535 for (Index = 0; Index < 100; Index++) {
536 if (readl(&reg->outbound_intstatus)
537 & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
538 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT
539 , &reg->outbound_intstatus);
540 return 0x00;
541 }
542 msleep_interruptible(10);
543 }/*max 1 seconds*/
544 } while (Retries++ < 20);/*max 20 sec*/
545 return 0xff;
546}
547
548static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
549 struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
550{
551 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
552 int8_t *psge = (int8_t *)&arcmsr_cdb->u;
553 uint32_t address_lo, address_hi;
554 int arccdbsize = 0x30;
555
556 ccb->pcmd = pcmd;
557 memset(arcmsr_cdb, 0, sizeof (struct ARCMSR_CDB));
558 arcmsr_cdb->Bus = 0;
559 arcmsr_cdb->TargetID = pcmd->device->id;
560 arcmsr_cdb->LUN = pcmd->device->lun;
561 arcmsr_cdb->Function = 1;
562 arcmsr_cdb->CdbLength = (uint8_t)pcmd->cmd_len;
563 arcmsr_cdb->Context = (unsigned long)arcmsr_cdb;
564 memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
565 if (pcmd->use_sg) {
566 int length, sgcount, i, cdb_sgcount = 0;
567 struct scatterlist *sl;
568
569 /* Get Scatter Gather List from scsiport. */
570 sl = (struct scatterlist *) pcmd->request_buffer;
571 sgcount = pci_map_sg(acb->pdev, sl, pcmd->use_sg,
572 pcmd->sc_data_direction);
573 /* map stor port SG list to our iop SG List. */
574 for (i = 0; i < sgcount; i++) {
575 /* Get the physical address of the current data pointer */
576 length = cpu_to_le32(sg_dma_len(sl));
577 address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sl)));
578 address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sl)));
579 if (address_hi == 0) {
580 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
581
582 pdma_sg->address = address_lo;
583 pdma_sg->length = length;
584 psge += sizeof (struct SG32ENTRY);
585 arccdbsize += sizeof (struct SG32ENTRY);
586 } else {
587 struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
588
589 pdma_sg->addresshigh = address_hi;
590 pdma_sg->address = address_lo;
591 pdma_sg->length = length|IS_SG64_ADDR;
592 psge += sizeof (struct SG64ENTRY);
593 arccdbsize += sizeof (struct SG64ENTRY);
594 }
595 sl++;
596 cdb_sgcount++;
597 }
598 arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
599 arcmsr_cdb->DataLength = pcmd->request_bufflen;
600 if ( arccdbsize > 256)
601 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
602 } else if (pcmd->request_bufflen) {
603 dma_addr_t dma_addr;
604 dma_addr = pci_map_single(acb->pdev, pcmd->request_buffer,
605 pcmd->request_bufflen, pcmd->sc_data_direction);
606 pcmd->SCp.dma_handle = dma_addr;
607 address_lo = cpu_to_le32(dma_addr_lo32(dma_addr));
608 address_hi = cpu_to_le32(dma_addr_hi32(dma_addr));
609 if (address_hi == 0) {
610 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
611 pdma_sg->address = address_lo;
612 pdma_sg->length = pcmd->request_bufflen;
613 } else {
614 struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
615 pdma_sg->addresshigh = address_hi;
616 pdma_sg->address = address_lo;
617 pdma_sg->length = pcmd->request_bufflen|IS_SG64_ADDR;
618 }
619 arcmsr_cdb->sgcount = 1;
620 arcmsr_cdb->DataLength = pcmd->request_bufflen;
621 }
622 if (pcmd->sc_data_direction == DMA_TO_DEVICE ) {
623 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
624 ccb->ccb_flags |= CCB_FLAG_WRITE;
625 }
626}
627
628static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
629{
630 struct MessageUnit __iomem *reg = acb->pmu;
631 uint32_t cdb_shifted_phyaddr = ccb->cdb_shifted_phyaddr;
632 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
633
634 atomic_inc(&acb->ccboutstandingcount);
635 ccb->startdone = ARCMSR_CCB_START;
636 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
637 writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
638 &reg->inbound_queueport);
639 else
640 writel(cdb_shifted_phyaddr, &reg->inbound_queueport);
641}
642
643void arcmsr_post_Qbuffer(struct AdapterControlBlock *acb)
644{
645 struct MessageUnit __iomem *reg = acb->pmu;
646 struct QBUFFER __iomem *pwbuffer = (struct QBUFFER __iomem *) &reg->message_wbuffer;
647 uint8_t __iomem *iop_data = (uint8_t __iomem *) pwbuffer->data;
648 int32_t allxfer_len = 0;
649
650 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
651 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
652 while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex)
653 && (allxfer_len < 124)) {
654 writeb(acb->wqbuffer[acb->wqbuf_firstindex], iop_data);
655 acb->wqbuf_firstindex++;
656 acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
657 iop_data++;
658 allxfer_len++;
659 }
660 writel(allxfer_len, &pwbuffer->data_len);
661 writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK
662 , &reg->inbound_doorbell);
663 }
664}
665
666static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
667{
668 struct MessageUnit __iomem *reg = acb->pmu;
669
670 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
671 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
672 if (arcmsr_wait_msgint_ready(acb))
673 printk(KERN_NOTICE
674 "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
675 , acb->host->host_no);
676}
677
678static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
679{
680 dma_free_coherent(&acb->pdev->dev,
681 ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20,
682 acb->dma_coherent,
683 acb->dma_coherent_handle);
684}
685
686static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
687{
688 struct MessageUnit __iomem *reg = acb->pmu;
689 struct CommandControlBlock *ccb;
690 uint32_t flag_ccb, outbound_intstatus, outbound_doorbell;
691
692 outbound_intstatus = readl(&reg->outbound_intstatus)
693 & acb->outbound_int_enable;
694 writel(outbound_intstatus, &reg->outbound_intstatus);
695 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
696 outbound_doorbell = readl(&reg->outbound_doorbell);
697 writel(outbound_doorbell, &reg->outbound_doorbell);
698 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
699 struct QBUFFER __iomem * prbuffer =
700 (struct QBUFFER __iomem *) &reg->message_rbuffer;
701 uint8_t __iomem * iop_data = (uint8_t __iomem *)prbuffer->data;
702 int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
703
704 rqbuf_lastindex = acb->rqbuf_lastindex;
705 rqbuf_firstindex = acb->rqbuf_firstindex;
706 iop_len = readl(&prbuffer->data_len);
707 my_empty_len = (rqbuf_firstindex - rqbuf_lastindex - 1)
708 &(ARCMSR_MAX_QBUFFER - 1);
709 if (my_empty_len >= iop_len) {
710 while (iop_len > 0) {
711 acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
712 acb->rqbuf_lastindex++;
713 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
714 iop_data++;
715 iop_len--;
716 }
717 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
718 &reg->inbound_doorbell);
719 } else
720 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
721 }
722 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
723 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
724 if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) {
725 struct QBUFFER __iomem * pwbuffer =
726 (struct QBUFFER __iomem *) &reg->message_wbuffer;
727 uint8_t __iomem * iop_data = (uint8_t __iomem *) pwbuffer->data;
728 int32_t allxfer_len = 0;
729
730 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
731 while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex)
732 && (allxfer_len < 124)) {
733 writeb(acb->wqbuffer[acb->wqbuf_firstindex], iop_data);
734 acb->wqbuf_firstindex++;
735 acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
736 iop_data++;
737 allxfer_len++;
738 }
739 writel(allxfer_len, &pwbuffer->data_len);
740 writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK,
741 &reg->inbound_doorbell);
742 }
743 if (acb->wqbuf_firstindex == acb->wqbuf_lastindex)
744 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
745 }
746 }
747 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
748 int id, lun;
749 /*
750 ****************************************************************
751 ** areca cdb command done
752 ****************************************************************
753 */
754 while (1) {
755 if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF)
756 break;/*chip FIFO no ccb for completion already*/
757 /* check if command done with no error*/
758 ccb = (struct CommandControlBlock *)(acb->vir2phy_offset +
759 (flag_ccb << 5));
760 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
761 if (ccb->startdone == ARCMSR_CCB_ABORTED) {
762 struct scsi_cmnd *abortcmd=ccb->pcmd;
763 if (abortcmd) {
764 abortcmd->result |= DID_ABORT >> 16;
765 arcmsr_ccb_complete(ccb, 1);
766 printk(KERN_NOTICE
767 "arcmsr%d: ccb='0x%p' isr got aborted command \n"
768 , acb->host->host_no, ccb);
769 }
770 continue;
771 }
772 printk(KERN_NOTICE
773 "arcmsr%d: isr get an illegal ccb command done acb='0x%p'"
774 "ccb='0x%p' ccbacb='0x%p' startdone = 0x%x"
775 " ccboutstandingcount=%d \n"
776 , acb->host->host_no
777 , acb
778 , ccb
779 , ccb->acb
780 , ccb->startdone
781 , atomic_read(&acb->ccboutstandingcount));
782 continue;
783 }
784 id = ccb->pcmd->device->id;
785 lun = ccb->pcmd->device->lun;
786 if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) {
787 if (acb->devstate[id][lun] == ARECA_RAID_GONE)
788 acb->devstate[id][lun] = ARECA_RAID_GOOD;
789 ccb->pcmd->result = DID_OK << 16;
790 arcmsr_ccb_complete(ccb, 1);
791 } else {
792 switch(ccb->arcmsr_cdb.DeviceStatus) {
793 case ARCMSR_DEV_SELECT_TIMEOUT: {
794 acb->devstate[id][lun] = ARECA_RAID_GONE;
795 ccb->pcmd->result = DID_TIME_OUT << 16;
796 arcmsr_ccb_complete(ccb, 1);
797 }
798 break;
799 case ARCMSR_DEV_ABORTED:
800 case ARCMSR_DEV_INIT_FAIL: {
801 acb->devstate[id][lun] = ARECA_RAID_GONE;
802 ccb->pcmd->result = DID_BAD_TARGET << 16;
803 arcmsr_ccb_complete(ccb, 1);
804 }
805 break;
806 case ARCMSR_DEV_CHECK_CONDITION: {
807 acb->devstate[id][lun] = ARECA_RAID_GOOD;
808 arcmsr_report_sense_info(ccb);
809 arcmsr_ccb_complete(ccb, 1);
810 }
811 break;
812 default:
813 printk(KERN_NOTICE
814 "arcmsr%d: scsi id=%d lun=%d"
815 " isr get command error done,"
816 "but got unknown DeviceStatus = 0x%x \n"
817 , acb->host->host_no
818 , id
819 , lun
820 , ccb->arcmsr_cdb.DeviceStatus);
821 acb->devstate[id][lun] = ARECA_RAID_GONE;
822 ccb->pcmd->result = DID_NO_CONNECT << 16;
823 arcmsr_ccb_complete(ccb, 1);
824 break;
825 }
826 }
827 }/*drain reply FIFO*/
828 }
829 if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT))
830 return IRQ_NONE;
831 return IRQ_HANDLED;
832}
833
834static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
835{
836 if (acb) {
837 /* stop adapter background rebuild */
838 if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
839 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
840 arcmsr_stop_adapter_bgrb(acb);
841 arcmsr_flush_adapter_cache(acb);
842 }
843 }
844}
845
846static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd)
847{
848 struct MessageUnit __iomem *reg = acb->pmu;
849 struct CMD_MESSAGE_FIELD *pcmdmessagefld;
850 int retvalue = 0, transfer_len = 0;
851 char *buffer;
852 uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 |
853 (uint32_t ) cmd->cmnd[6] << 16 |
854 (uint32_t ) cmd->cmnd[7] << 8 |
855 (uint32_t ) cmd->cmnd[8];
856 /* 4 bytes: Areca io control code */
857 if (cmd->use_sg) {
858 struct scatterlist *sg = (struct scatterlist *)cmd->request_buffer;
859
860 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
861 if (cmd->use_sg > 1) {
862 retvalue = ARCMSR_MESSAGE_FAIL;
863 goto message_out;
864 }
865 transfer_len += sg->length;
866 } else {
867 buffer = cmd->request_buffer;
868 transfer_len = cmd->request_bufflen;
869 }
870 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
871 retvalue = ARCMSR_MESSAGE_FAIL;
872 goto message_out;
873 }
874 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
875 switch(controlcode) {
876 case ARCMSR_MESSAGE_READ_RQBUFFER: {
877 unsigned long *ver_addr;
878 dma_addr_t buf_handle;
879 uint8_t *pQbuffer, *ptmpQbuffer;
880 int32_t allxfer_len = 0;
881
882 ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle);
883 if (!ver_addr) {
884 retvalue = ARCMSR_MESSAGE_FAIL;
885 goto message_out;
886 }
887 ptmpQbuffer = (uint8_t *) ver_addr;
888 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
889 && (allxfer_len < 1031)) {
890 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
891 memcpy(ptmpQbuffer, pQbuffer, 1);
892 acb->rqbuf_firstindex++;
893 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
894 ptmpQbuffer++;
895 allxfer_len++;
896 }
897 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
898 struct QBUFFER __iomem * prbuffer = (struct QBUFFER __iomem *)
899 &reg->message_rbuffer;
900 uint8_t __iomem * iop_data = (uint8_t __iomem *)prbuffer->data;
901 int32_t iop_len;
902
903 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
904 iop_len = readl(&prbuffer->data_len);
905 while (iop_len > 0) {
906 acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
907 acb->rqbuf_lastindex++;
908 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
909 iop_data++;
910 iop_len--;
911 }
912 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
913 &reg->inbound_doorbell);
914 }
915 memcpy(pcmdmessagefld->messagedatabuffer,
916 (uint8_t *)ver_addr, allxfer_len);
917 pcmdmessagefld->cmdmessage.Length = allxfer_len;
918 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
919 pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle);
920 }
921 break;
922 case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
923 unsigned long *ver_addr;
924 dma_addr_t buf_handle;
925 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
926 uint8_t *pQbuffer, *ptmpuserbuffer;
927
928 ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle);
929 if (!ver_addr) {
930 retvalue = ARCMSR_MESSAGE_FAIL;
931 goto message_out;
932 }
933 ptmpuserbuffer = (uint8_t *)ver_addr;
934 user_len = pcmdmessagefld->cmdmessage.Length;
935 memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
936 wqbuf_lastindex = acb->wqbuf_lastindex;
937 wqbuf_firstindex = acb->wqbuf_firstindex;
938 if (wqbuf_lastindex != wqbuf_firstindex) {
939 struct SENSE_DATA *sensebuffer =
940 (struct SENSE_DATA *)cmd->sense_buffer;
941 arcmsr_post_Qbuffer(acb);
942 /* has error report sensedata */
943 sensebuffer->ErrorCode = 0x70;
944 sensebuffer->SenseKey = ILLEGAL_REQUEST;
945 sensebuffer->AdditionalSenseLength = 0x0A;
946 sensebuffer->AdditionalSenseCode = 0x20;
947 sensebuffer->Valid = 1;
948 retvalue = ARCMSR_MESSAGE_FAIL;
949 } else {
950 my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
951 &(ARCMSR_MAX_QBUFFER - 1);
952 if (my_empty_len >= user_len) {
953 while (user_len > 0) {
954 pQbuffer =
955 &acb->wqbuffer[acb->wqbuf_lastindex];
956 memcpy(pQbuffer, ptmpuserbuffer, 1);
957 acb->wqbuf_lastindex++;
958 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
959 ptmpuserbuffer++;
960 user_len--;
961 }
962 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
963 acb->acb_flags &=
964 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
965 arcmsr_post_Qbuffer(acb);
966 }
967 } else {
968 /* has error report sensedata */
969 struct SENSE_DATA *sensebuffer =
970 (struct SENSE_DATA *)cmd->sense_buffer;
971 sensebuffer->ErrorCode = 0x70;
972 sensebuffer->SenseKey = ILLEGAL_REQUEST;
973 sensebuffer->AdditionalSenseLength = 0x0A;
974 sensebuffer->AdditionalSenseCode = 0x20;
975 sensebuffer->Valid = 1;
976 retvalue = ARCMSR_MESSAGE_FAIL;
977 }
978 }
979 pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle);
980 }
981 break;
982 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
983 uint8_t *pQbuffer = acb->rqbuffer;
984
985 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
986 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
987 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
988 &reg->inbound_doorbell);
989 }
990 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
991 acb->rqbuf_firstindex = 0;
992 acb->rqbuf_lastindex = 0;
993 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
994 pcmdmessagefld->cmdmessage.ReturnCode =
995 ARCMSR_MESSAGE_RETURNCODE_OK;
996 }
997 break;
998 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
999 uint8_t *pQbuffer = acb->wqbuffer;
1000
1001 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1002 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1003 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK
1004 , &reg->inbound_doorbell);
1005 }
1006 acb->acb_flags |=
1007 (ACB_F_MESSAGE_WQBUFFER_CLEARED |
1008 ACB_F_MESSAGE_WQBUFFER_READED);
1009 acb->wqbuf_firstindex = 0;
1010 acb->wqbuf_lastindex = 0;
1011 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1012 pcmdmessagefld->cmdmessage.ReturnCode =
1013 ARCMSR_MESSAGE_RETURNCODE_OK;
1014 }
1015 break;
1016 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
1017 uint8_t *pQbuffer;
1018
1019 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1020 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1021 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK
1022 , &reg->inbound_doorbell);
1023 }
1024 acb->acb_flags |=
1025 (ACB_F_MESSAGE_WQBUFFER_CLEARED
1026 | ACB_F_MESSAGE_RQBUFFER_CLEARED
1027 | ACB_F_MESSAGE_WQBUFFER_READED);
1028 acb->rqbuf_firstindex = 0;
1029 acb->rqbuf_lastindex = 0;
1030 acb->wqbuf_firstindex = 0;
1031 acb->wqbuf_lastindex = 0;
1032 pQbuffer = acb->rqbuffer;
1033 memset(pQbuffer, 0, sizeof (struct QBUFFER));
1034 pQbuffer = acb->wqbuffer;
1035 memset(pQbuffer, 0, sizeof (struct QBUFFER));
1036 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1037 }
1038 break;
1039 case ARCMSR_MESSAGE_RETURN_CODE_3F: {
1040 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
1041 }
1042 break;
1043 case ARCMSR_MESSAGE_SAY_HELLO: {
1044 int8_t * hello_string = "Hello! I am ARCMSR";
1045
1046 memcpy(pcmdmessagefld->messagedatabuffer, hello_string
1047 , (int16_t)strlen(hello_string));
1048 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1049 }
1050 break;
1051 case ARCMSR_MESSAGE_SAY_GOODBYE:
1052 arcmsr_iop_parking(acb);
1053 break;
1054 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
1055 arcmsr_flush_adapter_cache(acb);
1056 break;
1057 default:
1058 retvalue = ARCMSR_MESSAGE_FAIL;
1059 }
1060 message_out:
1061 if (cmd->use_sg) {
1062 struct scatterlist *sg;
1063
1064 sg = (struct scatterlist *) cmd->request_buffer;
1065 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1066 }
1067 return retvalue;
1068}
1069
1070static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
1071{
1072 struct list_head *head = &acb->ccb_free_list;
1073 struct CommandControlBlock *ccb = NULL;
1074
1075 if (!list_empty(head)) {
1076 ccb = list_entry(head->next, struct CommandControlBlock, list);
1077 list_del(head->next);
1078 }
1079 return ccb;
1080}
1081
1082static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
1083 struct scsi_cmnd *cmd)
1084{
1085 switch (cmd->cmnd[0]) {
1086 case INQUIRY: {
1087 unsigned char inqdata[36];
1088 char *buffer;
1089
1090 if (cmd->device->lun) {
1091 cmd->result = (DID_TIME_OUT << 16);
1092 cmd->scsi_done(cmd);
1093 return;
1094 }
1095 inqdata[0] = TYPE_PROCESSOR;
1096 /* Periph Qualifier & Periph Dev Type */
1097 inqdata[1] = 0;
1098 /* rem media bit & Dev Type Modifier */
1099 inqdata[2] = 0;
1100 /* ISO,ECMA,& ANSI versions */
1101 inqdata[4] = 31;
1102 /* length of additional data */
1103 strncpy(&inqdata[8], "Areca ", 8);
1104 /* Vendor Identification */
1105 strncpy(&inqdata[16], "RAID controller ", 16);
1106 /* Product Identification */
1107 strncpy(&inqdata[32], "R001", 4); /* Product Revision */
1108 if (cmd->use_sg) {
1109 struct scatterlist *sg;
1110
1111 sg = (struct scatterlist *) cmd->request_buffer;
1112 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1113 } else {
1114 buffer = cmd->request_buffer;
1115 }
1116 memcpy(buffer, inqdata, sizeof(inqdata));
1117 if (cmd->use_sg) {
1118 struct scatterlist *sg;
1119
1120 sg = (struct scatterlist *) cmd->request_buffer;
1121 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1122 }
1123 cmd->scsi_done(cmd);
1124 }
1125 break;
1126 case WRITE_BUFFER:
1127 case READ_BUFFER: {
1128 if (arcmsr_iop_message_xfer(acb, cmd))
1129 cmd->result = (DID_ERROR << 16);
1130 cmd->scsi_done(cmd);
1131 }
1132 break;
1133 default:
1134 cmd->scsi_done(cmd);
1135 }
1136}
1137
1138static int arcmsr_queue_command(struct scsi_cmnd *cmd,
1139 void (* done)(struct scsi_cmnd *))
1140{
1141 struct Scsi_Host *host = cmd->device->host;
1142 struct AdapterControlBlock *acb =
1143 (struct AdapterControlBlock *) host->hostdata;
1144 struct CommandControlBlock *ccb;
1145 int target = cmd->device->id;
1146 int lun = cmd->device->lun;
1147
1148 cmd->scsi_done = done;
1149 cmd->host_scribble = NULL;
1150 cmd->result = 0;
1151 if (acb->acb_flags & ACB_F_BUS_RESET) {
1152 printk(KERN_NOTICE "arcmsr%d: bus reset"
1153 " and return busy \n"
1154 , acb->host->host_no);
1155 return SCSI_MLQUEUE_HOST_BUSY;
1156 }
1157 if(target == 16) {
1158 /* virtual device for iop message transfer */
1159 arcmsr_handle_virtual_command(acb, cmd);
1160 return 0;
1161 }
1162 if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
1163 uint8_t block_cmd;
1164
1165 block_cmd = cmd->cmnd[0] & 0x0f;
1166 if (block_cmd == 0x08 || block_cmd == 0x0a) {
1167 printk(KERN_NOTICE
1168 "arcmsr%d: block 'read/write'"
1169 "command with gone raid volume"
1170 " Cmd=%2x, TargetId=%d, Lun=%d \n"
1171 , acb->host->host_no
1172 , cmd->cmnd[0]
1173 , target, lun);
1174 cmd->result = (DID_NO_CONNECT << 16);
1175 cmd->scsi_done(cmd);
1176 return 0;
1177 }
1178 }
1179 if (atomic_read(&acb->ccboutstandingcount) >=
1180 ARCMSR_MAX_OUTSTANDING_CMD)
1181 return SCSI_MLQUEUE_HOST_BUSY;
1182
1183 ccb = arcmsr_get_freeccb(acb);
1184 if (!ccb)
1185 return SCSI_MLQUEUE_HOST_BUSY;
1186 arcmsr_build_ccb(acb, ccb, cmd);
1187 arcmsr_post_ccb(acb, ccb);
1188 return 0;
1189}
1190
1191static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
1192{
1193 struct MessageUnit __iomem *reg = acb->pmu;
1194 char *acb_firm_model = acb->firm_model;
1195 char *acb_firm_version = acb->firm_version;
1196 char __iomem *iop_firm_model = (char __iomem *) &reg->message_rwbuffer[15];
1197 char __iomem *iop_firm_version = (char __iomem *) &reg->message_rwbuffer[17];
1198 int count;
1199
1200 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
1201 if (arcmsr_wait_msgint_ready(acb))
1202 printk(KERN_NOTICE
1203 "arcmsr%d: wait "
1204 "'get adapter firmware miscellaneous data' timeout \n"
1205 , acb->host->host_no);
1206 count = 8;
1207 while (count) {
1208 *acb_firm_model = readb(iop_firm_model);
1209 acb_firm_model++;
1210 iop_firm_model++;
1211 count--;
1212 }
1213 count = 16;
1214 while (count) {
1215 *acb_firm_version = readb(iop_firm_version);
1216 acb_firm_version++;
1217 iop_firm_version++;
1218 count--;
1219 }
1220 printk(KERN_INFO
1221 "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n"
1222 , acb->host->host_no
1223 , acb->firm_version);
1224 acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
1225 acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
1226 acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
1227 acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
1228}
1229
1230static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
1231 struct CommandControlBlock *poll_ccb)
1232{
1233 struct MessageUnit __iomem *reg = acb->pmu;
1234 struct CommandControlBlock *ccb;
1235 uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
1236 int id, lun;
1237
1238 polling_ccb_retry:
1239 poll_count++;
1240 outbound_intstatus = readl(&reg->outbound_intstatus)
1241 & acb->outbound_int_enable;
1242 writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
1243 while (1) {
1244 if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF) {
1245 if (poll_ccb_done)
1246 break;
1247 else {
1248 msleep(25);
1249 if (poll_count > 100)
1250 break;
1251 goto polling_ccb_retry;
1252 }
1253 }
1254 ccb = (struct CommandControlBlock *)
1255 (acb->vir2phy_offset + (flag_ccb << 5));
1256 if ((ccb->acb != acb) ||
1257 (ccb->startdone != ARCMSR_CCB_START)) {
1258 if ((ccb->startdone == ARCMSR_CCB_ABORTED) ||
1259 (ccb == poll_ccb)) {
1260 printk(KERN_NOTICE
1261 "arcmsr%d: scsi id=%d lun=%d ccb='0x%p'"
1262 " poll command abort successfully \n"
1263 , acb->host->host_no
1264 , ccb->pcmd->device->id
1265 , ccb->pcmd->device->lun
1266 , ccb);
1267 ccb->pcmd->result = DID_ABORT << 16;
1268 arcmsr_ccb_complete(ccb, 1);
1269 poll_ccb_done = 1;
1270 continue;
1271 }
1272 printk(KERN_NOTICE
1273 "arcmsr%d: polling get an illegal ccb"
1274 " command done ccb='0x%p'"
1275 "ccboutstandingcount=%d \n"
1276 , acb->host->host_no
1277 , ccb
1278 , atomic_read(&acb->ccboutstandingcount));
1279 continue;
1280 }
1281 id = ccb->pcmd->device->id;
1282 lun = ccb->pcmd->device->lun;
1283 if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) {
1284 if (acb->devstate[id][lun] == ARECA_RAID_GONE)
1285 acb->devstate[id][lun] = ARECA_RAID_GOOD;
1286 ccb->pcmd->result = DID_OK << 16;
1287 arcmsr_ccb_complete(ccb, 1);
1288 } else {
1289 switch(ccb->arcmsr_cdb.DeviceStatus) {
1290 case ARCMSR_DEV_SELECT_TIMEOUT: {
1291 acb->devstate[id][lun] = ARECA_RAID_GONE;
1292 ccb->pcmd->result = DID_TIME_OUT << 16;
1293 arcmsr_ccb_complete(ccb, 1);
1294 }
1295 break;
1296 case ARCMSR_DEV_ABORTED:
1297 case ARCMSR_DEV_INIT_FAIL: {
1298 acb->devstate[id][lun] = ARECA_RAID_GONE;
1299 ccb->pcmd->result = DID_BAD_TARGET << 16;
1300 arcmsr_ccb_complete(ccb, 1);
1301 }
1302 break;
1303 case ARCMSR_DEV_CHECK_CONDITION: {
1304 acb->devstate[id][lun] = ARECA_RAID_GOOD;
1305 arcmsr_report_sense_info(ccb);
1306 arcmsr_ccb_complete(ccb, 1);
1307 }
1308 break;
1309 default:
1310 printk(KERN_NOTICE
1311 "arcmsr%d: scsi id=%d lun=%d"
1312 " polling and getting command error done"
1313 "but got unknown DeviceStatus = 0x%x \n"
1314 , acb->host->host_no
1315 , id
1316 , lun
1317 , ccb->arcmsr_cdb.DeviceStatus);
1318 acb->devstate[id][lun] = ARECA_RAID_GONE;
1319 ccb->pcmd->result = DID_BAD_TARGET << 16;
1320 arcmsr_ccb_complete(ccb, 1);
1321 break;
1322 }
1323 }
1324 }
1325}
1326
1327static void arcmsr_iop_init(struct AdapterControlBlock *acb)
1328{
1329 struct MessageUnit __iomem *reg = acb->pmu;
1330 uint32_t intmask_org, mask, outbound_doorbell, firmware_state = 0;
1331
1332 do {
1333 firmware_state = readl(&reg->outbound_msgaddr1);
1334 } while (!(firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK));
1335 intmask_org = readl(&reg->outbound_intmask)
1336 | ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE;
1337 arcmsr_get_firmware_spec(acb);
1338
1339 acb->acb_flags |= ACB_F_MSG_START_BGRB;
1340 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
1341 if (arcmsr_wait_msgint_ready(acb)) {
1342 printk(KERN_NOTICE "arcmsr%d: "
1343 "wait 'start adapter background rebulid' timeout\n",
1344 acb->host->host_no);
1345 }
1346
1347 outbound_doorbell = readl(&reg->outbound_doorbell);
1348 writel(outbound_doorbell, &reg->outbound_doorbell);
1349 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
1350 mask = ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE
1351 | ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
1352 writel(intmask_org & mask, &reg->outbound_intmask);
1353 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
1354 acb->acb_flags |= ACB_F_IOP_INITED;
1355}
1356
1357static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
1358{
1359 struct MessageUnit __iomem *reg = acb->pmu;
1360 struct CommandControlBlock *ccb;
1361 uint32_t intmask_org;
1362 int i = 0;
1363
1364 if (atomic_read(&acb->ccboutstandingcount) != 0) {
1365 /* talk to iop 331 outstanding command aborted */
1366 arcmsr_abort_allcmd(acb);
1367 /* wait for 3 sec for all command aborted*/
1368 msleep_interruptible(3000);
1369 /* disable all outbound interrupt */
1370 intmask_org = arcmsr_disable_outbound_ints(acb);
1371 /* clear all outbound posted Q */
1372 for (i = 0; i < ARCMSR_MAX_OUTSTANDING_CMD; i++)
1373 readl(&reg->outbound_queueport);
1374 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
1375 ccb = acb->pccb_pool[i];
1376 if ((ccb->startdone == ARCMSR_CCB_START) ||
1377 (ccb->startdone == ARCMSR_CCB_ABORTED)) {
1378 ccb->startdone = ARCMSR_CCB_ABORTED;
1379 ccb->pcmd->result = DID_ABORT << 16;
1380 arcmsr_ccb_complete(ccb, 1);
1381 }
1382 }
1383 /* enable all outbound interrupt */
1384 arcmsr_enable_outbound_ints(acb, intmask_org);
1385 }
1386 atomic_set(&acb->ccboutstandingcount, 0);
1387}
1388
1389static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
1390{
1391 struct AdapterControlBlock *acb =
1392 (struct AdapterControlBlock *)cmd->device->host->hostdata;
1393 int i;
1394
1395 acb->num_resets++;
1396 acb->acb_flags |= ACB_F_BUS_RESET;
1397 for (i = 0; i < 400; i++) {
1398 if (!atomic_read(&acb->ccboutstandingcount))
1399 break;
1400 arcmsr_interrupt(acb);
1401 msleep(25);
1402 }
1403 arcmsr_iop_reset(acb);
1404 acb->acb_flags &= ~ACB_F_BUS_RESET;
1405 return SUCCESS;
1406}
1407
1408static void arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
1409 struct CommandControlBlock *ccb)
1410{
1411 u32 intmask;
1412
1413 ccb->startdone = ARCMSR_CCB_ABORTED;
1414
1415 /*
1416 ** Wait for 3 sec for all command done.
1417 */
1418 msleep_interruptible(3000);
1419
1420 intmask = arcmsr_disable_outbound_ints(acb);
1421 arcmsr_polling_ccbdone(acb, ccb);
1422 arcmsr_enable_outbound_ints(acb, intmask);
1423}
1424
1425static int arcmsr_abort(struct scsi_cmnd *cmd)
1426{
1427 struct AdapterControlBlock *acb =
1428 (struct AdapterControlBlock *)cmd->device->host->hostdata;
1429 int i = 0;
1430
1431 printk(KERN_NOTICE
1432 "arcmsr%d: abort device command of scsi id=%d lun=%d \n",
1433 acb->host->host_no, cmd->device->id, cmd->device->lun);
1434 acb->num_aborts++;
1435
1436 /*
1437 ************************************************
1438 ** the all interrupt service routine is locked
1439 ** we need to handle it as soon as possible and exit
1440 ************************************************
1441 */
1442 if (!atomic_read(&acb->ccboutstandingcount))
1443 return SUCCESS;
1444
1445 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
1446 struct CommandControlBlock *ccb = acb->pccb_pool[i];
1447 if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
1448 arcmsr_abort_one_cmd(acb, ccb);
1449 break;
1450 }
1451 }
1452
1453 return SUCCESS;
1454}
1455
1456static const char *arcmsr_info(struct Scsi_Host *host)
1457{
1458 struct AdapterControlBlock *acb =
1459 (struct AdapterControlBlock *) host->hostdata;
1460 static char buf[256];
1461 char *type;
1462 int raid6 = 1;
1463
1464 switch (acb->pdev->device) {
1465 case PCI_DEVICE_ID_ARECA_1110:
1466 case PCI_DEVICE_ID_ARECA_1210:
1467 raid6 = 0;
1468 /*FALLTHRU*/
1469 case PCI_DEVICE_ID_ARECA_1120:
1470 case PCI_DEVICE_ID_ARECA_1130:
1471 case PCI_DEVICE_ID_ARECA_1160:
1472 case PCI_DEVICE_ID_ARECA_1170:
1473 case PCI_DEVICE_ID_ARECA_1220:
1474 case PCI_DEVICE_ID_ARECA_1230:
1475 case PCI_DEVICE_ID_ARECA_1260:
1476 case PCI_DEVICE_ID_ARECA_1270:
1477 case PCI_DEVICE_ID_ARECA_1280:
1478 type = "SATA";
1479 break;
1480 case PCI_DEVICE_ID_ARECA_1380:
1481 case PCI_DEVICE_ID_ARECA_1381:
1482 case PCI_DEVICE_ID_ARECA_1680:
1483 case PCI_DEVICE_ID_ARECA_1681:
1484 type = "SAS";
1485 break;
1486 default:
1487 type = "X-TYPE";
1488 break;
1489 }
1490 sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s",
1491 type, raid6 ? "( RAID6 capable)" : "",
1492 ARCMSR_DRIVER_VERSION);
1493 return buf;
1494}
1495
1496
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index e1337339cacc..7b3bd34faf47 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -46,7 +46,6 @@ MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
46 46
47#include <linux/stat.h> 47#include <linux/stat.h>
48#include <linux/slab.h> /* for kmalloc() */ 48#include <linux/slab.h> /* for kmalloc() */
49#include <linux/config.h> /* for CONFIG_PCI */
50#include <linux/pci.h> /* for PCI support */ 49#include <linux/pci.h> /* for PCI support */
51#include <linux/proc_fs.h> 50#include <linux/proc_fs.h>
52#include <linux/blkdev.h> 51#include <linux/blkdev.h>
@@ -185,7 +184,7 @@ static int adpt_detect(struct scsi_host_template* sht)
185 PINFO("Detecting Adaptec I2O RAID controllers...\n"); 184 PINFO("Detecting Adaptec I2O RAID controllers...\n");
186 185
187 /* search for all Adatpec I2O RAID cards */ 186 /* search for all Adatpec I2O RAID cards */
188 while ((pDev = pci_find_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) { 187 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
189 if(pDev->device == PCI_DPT_DEVICE_ID || 188 if(pDev->device == PCI_DPT_DEVICE_ID ||
190 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){ 189 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
191 if(adpt_install_hba(sht, pDev) ){ 190 if(adpt_install_hba(sht, pDev) ){
@@ -193,8 +192,11 @@ static int adpt_detect(struct scsi_host_template* sht)
193 PERROR("Will not try to detect others.\n"); 192 PERROR("Will not try to detect others.\n");
194 return hba_count-1; 193 return hba_count-1;
195 } 194 }
195 pci_dev_get(pDev);
196 } 196 }
197 } 197 }
198 if (pDev)
199 pci_dev_put(pDev);
198 200
199 /* In INIT state, Activate IOPs */ 201 /* In INIT state, Activate IOPs */
200 for (pHba = hba_chain; pHba; pHba = pHba->next) { 202 for (pHba = hba_chain; pHba; pHba = pHba->next) {
@@ -1076,6 +1078,7 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba)
1076 } 1078 }
1077 } 1079 }
1078 } 1080 }
1081 pci_dev_put(pHba->pDev);
1079 kfree(pHba); 1082 kfree(pHba);
1080 1083
1081 if(hba_count <= 0){ 1084 if(hba_count <= 0){
diff --git a/drivers/scsi/eata_generic.h b/drivers/scsi/eata_generic.h
index 34bce2c9e92e..635c14861f86 100644
--- a/drivers/scsi/eata_generic.h
+++ b/drivers/scsi/eata_generic.h
@@ -364,6 +364,7 @@ typedef struct hstd {
364 __u8 moresupport; /* HBA supports MORE flag */ 364 __u8 moresupport; /* HBA supports MORE flag */
365 struct Scsi_Host *next; 365 struct Scsi_Host *next;
366 struct Scsi_Host *prev; 366 struct Scsi_Host *prev;
367 struct pci_dev *pdev; /* PCI device or NULL for non PCI */
367 struct eata_sp sp; /* status packet */ 368 struct eata_sp sp; /* status packet */
368 struct eata_ccb ccb[0]; /* ccb array begins here */ 369 struct eata_ccb ccb[0]; /* ccb array begins here */
369}hostdata; 370}hostdata;
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index 771b01984cbc..d312633db92b 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -71,11 +71,11 @@
71#include "eata_pio.h" 71#include "eata_pio.h"
72 72
73 73
74static uint ISAbases[MAXISA] = { 74static unsigned int ISAbases[MAXISA] = {
75 0x1F0, 0x170, 0x330, 0x230 75 0x1F0, 0x170, 0x330, 0x230
76}; 76};
77 77
78static uint ISAirqs[MAXISA] = { 78static unsigned int ISAirqs[MAXISA] = {
79 14, 12, 15, 11 79 14, 12, 15, 11
80}; 80};
81 81
@@ -84,7 +84,7 @@ static unsigned char EISAbases[] = {
84 1, 1, 1, 1, 1, 1, 1, 1 84 1, 1, 1, 1, 1, 1, 1, 1
85}; 85};
86 86
87static uint registered_HBAs; 87static unsigned int registered_HBAs;
88static struct Scsi_Host *last_HBA; 88static struct Scsi_Host *last_HBA;
89static struct Scsi_Host *first_HBA; 89static struct Scsi_Host *first_HBA;
90static unsigned char reg_IRQ[16]; 90static unsigned char reg_IRQ[16];
@@ -165,6 +165,7 @@ static int eata_pio_proc_info(struct Scsi_Host *shost, char *buffer, char **star
165 165
166static int eata_pio_release(struct Scsi_Host *sh) 166static int eata_pio_release(struct Scsi_Host *sh)
167{ 167{
168 hostdata *hd = SD(sh);
168 if (sh->irq && reg_IRQ[sh->irq] == 1) 169 if (sh->irq && reg_IRQ[sh->irq] == 1)
169 free_irq(sh->irq, NULL); 170 free_irq(sh->irq, NULL);
170 else 171 else
@@ -173,10 +174,13 @@ static int eata_pio_release(struct Scsi_Host *sh)
173 if (sh->io_port && sh->n_io_port) 174 if (sh->io_port && sh->n_io_port)
174 release_region(sh->io_port, sh->n_io_port); 175 release_region(sh->io_port, sh->n_io_port);
175 } 176 }
177 /* At this point the PCI reference can go */
178 if (hd->pdev)
179 pci_dev_put(hd->pdev);
176 return 1; 180 return 1;
177} 181}
178 182
179static void IncStat(struct scsi_pointer *SCp, uint Increment) 183static void IncStat(struct scsi_pointer *SCp, unsigned int Increment)
180{ 184{
181 SCp->ptr += Increment; 185 SCp->ptr += Increment;
182 if ((SCp->this_residual -= Increment) == 0) { 186 if ((SCp->this_residual -= Increment) == 0) {
@@ -190,46 +194,49 @@ static void IncStat(struct scsi_pointer *SCp, uint Increment)
190 } 194 }
191} 195}
192 196
193static void eata_pio_int_handler(int irq, void *dev_id, struct pt_regs *regs); 197static irqreturn_t eata_pio_int_handler(int irq, void *dev_id, struct pt_regs *regs);
194 198
195static irqreturn_t do_eata_pio_int_handler(int irq, void *dev_id, 199static irqreturn_t do_eata_pio_int_handler(int irq, void *dev_id,
196 struct pt_regs *regs) 200 struct pt_regs *regs)
197{ 201{
198 unsigned long flags; 202 unsigned long flags;
199 struct Scsi_Host *dev = dev_id; 203 struct Scsi_Host *dev = dev_id;
204 irqreturn_t ret;
200 205
201 spin_lock_irqsave(dev->host_lock, flags); 206 spin_lock_irqsave(dev->host_lock, flags);
202 eata_pio_int_handler(irq, dev_id, regs); 207 ret = eata_pio_int_handler(irq, dev_id, regs);
203 spin_unlock_irqrestore(dev->host_lock, flags); 208 spin_unlock_irqrestore(dev->host_lock, flags);
204 return IRQ_HANDLED; 209 return ret;
205} 210}
206 211
207static void eata_pio_int_handler(int irq, void *dev_id, struct pt_regs *regs) 212static irqreturn_t eata_pio_int_handler(int irq, void *dev_id, struct pt_regs *regs)
208{ 213{
209 uint eata_stat = 0xfffff; 214 unsigned int eata_stat = 0xfffff;
210 struct scsi_cmnd *cmd; 215 struct scsi_cmnd *cmd;
211 hostdata *hd; 216 hostdata *hd;
212 struct eata_ccb *cp; 217 struct eata_ccb *cp;
213 uint base; 218 unsigned long base;
214 uint x, z; 219 unsigned int x, z;
215 struct Scsi_Host *sh; 220 struct Scsi_Host *sh;
216 unsigned short zwickel = 0; 221 unsigned short zwickel = 0;
217 unsigned char stat, odd; 222 unsigned char stat, odd;
223 irqreturn_t ret = IRQ_NONE;
218 224
219 for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->prev) 225 for (x = 1, sh = first_HBA; x <= registered_HBAs; x++, sh = SD(sh)->prev)
220 { 226 {
221 if (sh->irq != irq) 227 if (sh->irq != irq)
222 continue; 228 continue;
223 if (inb((uint) sh->base + HA_RSTATUS) & HA_SBUSY) 229 if (inb(sh->base + HA_RSTATUS) & HA_SBUSY)
224 continue; 230 continue;
225 231
226 int_counter++; 232 int_counter++;
233 ret = IRQ_HANDLED;
227 234
228 hd = SD(sh); 235 hd = SD(sh);
229 236
230 cp = &hd->ccb[0]; 237 cp = &hd->ccb[0];
231 cmd = cp->cmd; 238 cmd = cp->cmd;
232 base = (uint) cmd->device->host->base; 239 base = cmd->device->host->base;
233 240
234 do { 241 do {
235 stat = inb(base + HA_RSTATUS); 242 stat = inb(base + HA_RSTATUS);
@@ -304,7 +311,7 @@ static void eata_pio_int_handler(int irq, void *dev_id, struct pt_regs *regs)
304 if (!(inb(base + HA_RSTATUS) & HA_SERROR)) { 311 if (!(inb(base + HA_RSTATUS) & HA_SERROR)) {
305 cmd->result = (DID_OK << 16); 312 cmd->result = (DID_OK << 16);
306 hd->devflags |= (1 << cp->cp_id); 313 hd->devflags |= (1 << cp->cp_id);
307 } else if (hd->devflags & 1 << cp->cp_id) 314 } else if (hd->devflags & (1 << cp->cp_id))
308 cmd->result = (DID_OK << 16) + 0x02; 315 cmd->result = (DID_OK << 16) + 0x02;
309 else 316 else
310 cmd->result = (DID_NO_CONNECT << 16); 317 cmd->result = (DID_NO_CONNECT << 16);
@@ -313,7 +320,7 @@ static void eata_pio_int_handler(int irq, void *dev_id, struct pt_regs *regs)
313 cp->status = FREE; 320 cp->status = FREE;
314 eata_stat = inb(base + HA_RSTATUS); 321 eata_stat = inb(base + HA_RSTATUS);
315 printk(KERN_CRIT "eata_pio: int_handler, freeing locked " "queueslot\n"); 322 printk(KERN_CRIT "eata_pio: int_handler, freeing locked " "queueslot\n");
316 return; 323 return ret;
317 } 324 }
318#if DBG_INTR2 325#if DBG_INTR2
319 if (stat != 0x50) 326 if (stat != 0x50)
@@ -325,12 +332,12 @@ static void eata_pio_int_handler(int irq, void *dev_id, struct pt_regs *regs)
325 cmd->scsi_done(cmd); 332 cmd->scsi_done(cmd);
326 } 333 }
327 334
328 return; 335 return ret;
329} 336}
330 337
331static inline uint eata_pio_send_command(uint base, unsigned char command) 338static inline unsigned int eata_pio_send_command(unsigned long base, unsigned char command)
332{ 339{
333 uint loop = HZ / 2; 340 unsigned int loop = 50;
334 341
335 while (inb(base + HA_RSTATUS) & HA_SBUSY) 342 while (inb(base + HA_RSTATUS) & HA_SBUSY)
336 if (--loop == 0) 343 if (--loop == 0)
@@ -349,8 +356,8 @@ static inline uint eata_pio_send_command(uint base, unsigned char command)
349static int eata_pio_queue(struct scsi_cmnd *cmd, 356static int eata_pio_queue(struct scsi_cmnd *cmd,
350 void (*done)(struct scsi_cmnd *)) 357 void (*done)(struct scsi_cmnd *))
351{ 358{
352 uint x, y; 359 unsigned int x, y;
353 uint base; 360 unsigned long base;
354 361
355 hostdata *hd; 362 hostdata *hd;
356 struct Scsi_Host *sh; 363 struct Scsi_Host *sh;
@@ -360,7 +367,7 @@ static int eata_pio_queue(struct scsi_cmnd *cmd,
360 367
361 hd = HD(cmd); 368 hd = HD(cmd);
362 sh = cmd->device->host; 369 sh = cmd->device->host;
363 base = (uint) sh->base; 370 base = sh->base;
364 371
365 /* use only slot 0, as 2001 can handle only one cmd at a time */ 372 /* use only slot 0, as 2001 can handle only one cmd at a time */
366 373
@@ -395,9 +402,9 @@ static int eata_pio_queue(struct scsi_cmnd *cmd,
395 cp->DataIn = 0; /* Input mode */ 402 cp->DataIn = 0; /* Input mode */
396 403
397 cp->Interpret = (cmd->device->id == hd->hostid); 404 cp->Interpret = (cmd->device->id == hd->hostid);
398 cp->cp_datalen = htonl((unsigned long) cmd->request_bufflen); 405 cp->cp_datalen = cpu_to_be32(cmd->request_bufflen);
399 cp->Auto_Req_Sen = 0; 406 cp->Auto_Req_Sen = 0;
400 cp->cp_reqDMA = htonl(0); 407 cp->cp_reqDMA = 0;
401 cp->reqlen = 0; 408 cp->reqlen = 0;
402 409
403 cp->cp_id = cmd->device->id; 410 cp->cp_id = cmd->device->id;
@@ -406,7 +413,7 @@ static int eata_pio_queue(struct scsi_cmnd *cmd,
406 cp->cp_identify = 1; 413 cp->cp_identify = 1;
407 memcpy(cp->cp_cdb, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd)); 414 memcpy(cp->cp_cdb, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd));
408 415
409 cp->cp_statDMA = htonl(0); 416 cp->cp_statDMA = 0;
410 417
411 cp->cp_viraddr = cp; 418 cp->cp_viraddr = cp;
412 cp->cmd = cmd; 419 cp->cmd = cmd;
@@ -445,14 +452,14 @@ static int eata_pio_queue(struct scsi_cmnd *cmd,
445 452
446 DBG(DBG_QUEUE, scmd_printk(KERN_DEBUG, cmd, 453 DBG(DBG_QUEUE, scmd_printk(KERN_DEBUG, cmd,
447 "Queued base %#.4lx pid: %ld " 454 "Queued base %#.4lx pid: %ld "
448 "slot %d irq %d\n", (long) sh->base, cmd->pid, y, sh->irq)); 455 "slot %d irq %d\n", sh->base, cmd->pid, y, sh->irq));
449 456
450 return (0); 457 return (0);
451} 458}
452 459
453static int eata_pio_abort(struct scsi_cmnd *cmd) 460static int eata_pio_abort(struct scsi_cmnd *cmd)
454{ 461{
455 uint loop = HZ; 462 unsigned int loop = 100;
456 463
457 DBG(DBG_ABNORM, scmd_printk(KERN_WARNING, cmd, 464 DBG(DBG_ABNORM, scmd_printk(KERN_WARNING, cmd,
458 "eata_pio_abort called pid: %ld\n", 465 "eata_pio_abort called pid: %ld\n",
@@ -485,7 +492,7 @@ static int eata_pio_abort(struct scsi_cmnd *cmd)
485 492
486static int eata_pio_host_reset(struct scsi_cmnd *cmd) 493static int eata_pio_host_reset(struct scsi_cmnd *cmd)
487{ 494{
488 uint x, limit = 0; 495 unsigned int x, limit = 0;
489 unsigned char success = 0; 496 unsigned char success = 0;
490 struct scsi_cmnd *sp; 497 struct scsi_cmnd *sp;
491 struct Scsi_Host *host = cmd->device->host; 498 struct Scsi_Host *host = cmd->device->host;
@@ -518,7 +525,7 @@ static int eata_pio_host_reset(struct scsi_cmnd *cmd)
518 } 525 }
519 526
520 /* hard reset the HBA */ 527 /* hard reset the HBA */
521 outb(EATA_CMD_RESET, (uint) cmd->device->host->base + HA_WCOMMAND); 528 outb(EATA_CMD_RESET, cmd->device->host->base + HA_WCOMMAND);
522 529
523 DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio_reset: board reset done.\n")); 530 DBG(DBG_ABNORM, printk(KERN_WARNING "eata_pio_reset: board reset done.\n"));
524 HD(cmd)->state = RESET; 531 HD(cmd)->state = RESET;
@@ -558,7 +565,7 @@ static int eata_pio_host_reset(struct scsi_cmnd *cmd)
558 } 565 }
559} 566}
560 567
561static char *get_pio_board_data(unsigned long base, uint irq, uint id, unsigned long cplen, unsigned short cppadlen) 568static char *get_pio_board_data(unsigned long base, unsigned int irq, unsigned int id, unsigned long cplen, unsigned short cppadlen)
562{ 569{
563 struct eata_ccb cp; 570 struct eata_ccb cp;
564 static char buff[256]; 571 static char buff[256];
@@ -570,8 +577,8 @@ static char *get_pio_board_data(unsigned long base, uint irq, uint id, unsigned
570 cp.DataIn = 1; 577 cp.DataIn = 1;
571 cp.Interpret = 1; /* Interpret command */ 578 cp.Interpret = 1; /* Interpret command */
572 579
573 cp.cp_datalen = htonl(254); 580 cp.cp_datalen = cpu_to_be32(254);
574 cp.cp_dataDMA = htonl(0); 581 cp.cp_dataDMA = cpu_to_be32(0);
575 582
576 cp.cp_id = id; 583 cp.cp_id = id;
577 cp.cp_lun = 0; 584 cp.cp_lun = 0;
@@ -583,7 +590,7 @@ static char *get_pio_board_data(unsigned long base, uint irq, uint id, unsigned
583 cp.cp_cdb[4] = 254; 590 cp.cp_cdb[4] = 254;
584 cp.cp_cdb[5] = 0; 591 cp.cp_cdb[5] = 0;
585 592
586 if (eata_pio_send_command((uint) base, EATA_CMD_PIO_SEND_CP)) 593 if (eata_pio_send_command(base, EATA_CMD_PIO_SEND_CP))
587 return (NULL); 594 return (NULL);
588 while (!(inb(base + HA_RSTATUS) & HA_SDRQ)); 595 while (!(inb(base + HA_RSTATUS) & HA_SDRQ));
589 outsw(base + HA_RDATA, &cp, cplen); 596 outsw(base + HA_RDATA, &cp, cplen);
@@ -604,7 +611,7 @@ static char *get_pio_board_data(unsigned long base, uint irq, uint id, unsigned
604 } 611 }
605} 612}
606 613
607static int get_pio_conf_PIO(u32 base, struct get_conf *buf) 614static int get_pio_conf_PIO(unsigned long base, struct get_conf *buf)
608{ 615{
609 unsigned long loop = HZ / 2; 616 unsigned long loop = HZ / 2;
610 int z; 617 int z;
@@ -619,30 +626,30 @@ static int get_pio_conf_PIO(u32 base, struct get_conf *buf)
619 if (--loop == 0) 626 if (--loop == 0)
620 goto fail; 627 goto fail;
621 628
622 DBG(DBG_PIO && DBG_PROBE, printk(KERN_DEBUG "Issuing PIO READ CONFIG to HBA at %#x\n", base)); 629 DBG(DBG_PIO && DBG_PROBE, printk(KERN_DEBUG "Issuing PIO READ CONFIG to HBA at %#lx\n", base));
623 eata_pio_send_command(base, EATA_CMD_PIO_READ_CONFIG); 630 eata_pio_send_command(base, EATA_CMD_PIO_READ_CONFIG);
624 631
625 loop = HZ / 2; 632 loop = 50;
626 for (p = (unsigned short *) buf; (long) p <= ((long) buf + (sizeof(struct get_conf) / 2)); p++) { 633 for (p = (unsigned short *) buf; (long) p <= ((long) buf + (sizeof(struct get_conf) / 2)); p++) {
627 while (!(inb(base + HA_RSTATUS) & HA_SDRQ)) 634 while (!(inb(base + HA_RSTATUS) & HA_SDRQ))
628 if (--loop == 0) 635 if (--loop == 0)
629 goto fail; 636 goto fail;
630 637
631 loop = HZ / 2; 638 loop = 50;
632 *p = inw(base + HA_RDATA); 639 *p = inw(base + HA_RDATA);
633 } 640 }
634 if (inb(base + HA_RSTATUS) & HA_SERROR) { 641 if (inb(base + HA_RSTATUS) & HA_SERROR) {
635 DBG(DBG_PROBE, printk("eata_dma: get_conf_PIO, error during " 642 DBG(DBG_PROBE, printk("eata_dma: get_conf_PIO, error during "
636 "transfer for HBA at %x\n", base)); 643 "transfer for HBA at %lx\n", base));
637 goto fail; 644 goto fail;
638 } 645 }
639 646
640 if (htonl(EATA_SIGNATURE) != buf->signature) 647 if (cpu_to_be32(EATA_SIGNATURE) != buf->signature)
641 goto fail; 648 goto fail;
642 649
643 DBG(DBG_PIO && DBG_PROBE, printk(KERN_NOTICE "EATA Controller found " 650 DBG(DBG_PIO && DBG_PROBE, printk(KERN_NOTICE "EATA Controller found "
644 "at %#4x EATA Level: %x\n", 651 "at %#4lx EATA Level: %x\n",
645 base, (uint) (buf->version))); 652 base, (unsigned int) (buf->version)));
646 653
647 while (inb(base + HA_RSTATUS) & HA_SDRQ) 654 while (inb(base + HA_RSTATUS) & HA_SDRQ)
648 inw(base + HA_RDATA); 655 inw(base + HA_RDATA);
@@ -665,12 +672,12 @@ static int get_pio_conf_PIO(u32 base, struct get_conf *buf)
665static void print_pio_config(struct get_conf *gc) 672static void print_pio_config(struct get_conf *gc)
666{ 673{
667 printk("Please check values: (read config data)\n"); 674 printk("Please check values: (read config data)\n");
668 printk("LEN: %d ver:%d OCS:%d TAR:%d TRNXFR:%d MORES:%d\n", (uint) ntohl(gc->len), gc->version, gc->OCS_enabled, gc->TAR_support, gc->TRNXFR, gc->MORE_support); 675 printk("LEN: %d ver:%d OCS:%d TAR:%d TRNXFR:%d MORES:%d\n", be32_to_cpu(gc->len), gc->version, gc->OCS_enabled, gc->TAR_support, gc->TRNXFR, gc->MORE_support);
669 printk("HAAV:%d SCSIID0:%d ID1:%d ID2:%d QUEUE:%d SG:%d SEC:%d\n", gc->HAA_valid, gc->scsi_id[3], gc->scsi_id[2], gc->scsi_id[1], ntohs(gc->queuesiz), ntohs(gc->SGsiz), gc->SECOND); 676 printk("HAAV:%d SCSIID0:%d ID1:%d ID2:%d QUEUE:%d SG:%d SEC:%d\n", gc->HAA_valid, gc->scsi_id[3], gc->scsi_id[2], gc->scsi_id[1], be16_to_cpu(gc->queuesiz), be16_to_cpu(gc->SGsiz), gc->SECOND);
670 printk("IRQ:%d IRQT:%d FORCADR:%d MCH:%d RIDQ:%d\n", gc->IRQ, gc->IRQ_TR, gc->FORCADR, gc->MAX_CHAN, gc->ID_qest); 677 printk("IRQ:%d IRQT:%d FORCADR:%d MCH:%d RIDQ:%d\n", gc->IRQ, gc->IRQ_TR, gc->FORCADR, gc->MAX_CHAN, gc->ID_qest);
671} 678}
672 679
673static uint print_selftest(uint base) 680static unsigned int print_selftest(unsigned int base)
674{ 681{
675 unsigned char buffer[512]; 682 unsigned char buffer[512];
676#ifdef VERBOSE_SETUP 683#ifdef VERBOSE_SETUP
@@ -697,7 +704,7 @@ static uint print_selftest(uint base)
697 return (!(inb(base + HA_RSTATUS) & HA_SERROR)); 704 return (!(inb(base + HA_RSTATUS) & HA_SERROR));
698} 705}
699 706
700static int register_pio_HBA(long base, struct get_conf *gc) 707static int register_pio_HBA(long base, struct get_conf *gc, struct pci_dev *pdev)
701{ 708{
702 unsigned long size = 0; 709 unsigned long size = 0;
703 char *buff; 710 char *buff;
@@ -714,17 +721,17 @@ static int register_pio_HBA(long base, struct get_conf *gc)
714 return 0; 721 return 0;
715 } 722 }
716 723
717 if ((buff = get_pio_board_data((uint) base, gc->IRQ, gc->scsi_id[3], cplen = (htonl(gc->cplen) + 1) / 2, cppadlen = (htons(gc->cppadlen) + 1) / 2)) == NULL) { 724 if ((buff = get_pio_board_data(base, gc->IRQ, gc->scsi_id[3], cplen = (cpu_to_be32(gc->cplen) + 1) / 2, cppadlen = (cpu_to_be16(gc->cppadlen) + 1) / 2)) == NULL) {
718 printk("HBA at %#lx didn't react on INQUIRY. Sorry.\n", (unsigned long) base); 725 printk("HBA at %#lx didn't react on INQUIRY. Sorry.\n", base);
719 return 0; 726 return 0;
720 } 727 }
721 728
722 if (!print_selftest(base) && !ALLOW_DMA_BOARDS) { 729 if (!print_selftest(base) && !ALLOW_DMA_BOARDS) {
723 printk("HBA at %#lx failed while performing self test & setup.\n", (unsigned long) base); 730 printk("HBA at %#lx failed while performing self test & setup.\n", base);
724 return 0; 731 return 0;
725 } 732 }
726 733
727 size = sizeof(hostdata) + (sizeof(struct eata_ccb) * ntohs(gc->queuesiz)); 734 size = sizeof(hostdata) + (sizeof(struct eata_ccb) * be16_to_cpu(gc->queuesiz));
728 735
729 sh = scsi_register(&driver_template, size); 736 sh = scsi_register(&driver_template, size);
730 if (sh == NULL) 737 if (sh == NULL)
@@ -749,8 +756,8 @@ static int register_pio_HBA(long base, struct get_conf *gc)
749 756
750 hd = SD(sh); 757 hd = SD(sh);
751 758
752 memset(hd->ccb, 0, (sizeof(struct eata_ccb) * ntohs(gc->queuesiz))); 759 memset(hd->ccb, 0, (sizeof(struct eata_ccb) * be16_to_cpu(gc->queuesiz)));
753 memset(hd->reads, 0, sizeof(unsigned long) * 26); 760 memset(hd->reads, 0, sizeof(hd->reads));
754 761
755 strlcpy(SD(sh)->vendor, &buff[8], sizeof(SD(sh)->vendor)); 762 strlcpy(SD(sh)->vendor, &buff[8], sizeof(SD(sh)->vendor));
756 strlcpy(SD(sh)->name, &buff[16], sizeof(SD(sh)->name)); 763 strlcpy(SD(sh)->name, &buff[16], sizeof(SD(sh)->name));
@@ -761,7 +768,7 @@ static int register_pio_HBA(long base, struct get_conf *gc)
761 SD(sh)->revision[4] = buff[35]; 768 SD(sh)->revision[4] = buff[35];
762 SD(sh)->revision[5] = 0; 769 SD(sh)->revision[5] = 0;
763 770
764 switch (ntohl(gc->len)) { 771 switch (be32_to_cpu(gc->len)) {
765 case 0x1c: 772 case 0x1c:
766 SD(sh)->EATA_revision = 'a'; 773 SD(sh)->EATA_revision = 'a';
767 break; 774 break;
@@ -777,7 +784,7 @@ static int register_pio_HBA(long base, struct get_conf *gc)
777 SD(sh)->EATA_revision = '?'; 784 SD(sh)->EATA_revision = '?';
778 } 785 }
779 786
780 if (ntohl(gc->len) >= 0x22) { 787 if (be32_to_cpu(gc->len) >= 0x22) {
781 if (gc->is_PCI) 788 if (gc->is_PCI)
782 hd->bustype = IS_PCI; 789 hd->bustype = IS_PCI;
783 else if (gc->is_EISA) 790 else if (gc->is_EISA)
@@ -811,6 +818,8 @@ static int register_pio_HBA(long base, struct get_conf *gc)
811 818
812 hd->channel = 0; 819 hd->channel = 0;
813 820
821 hd->pdev = pci_dev_get(pdev); /* Keep a PCI reference */
822
814 sh->max_id = 8; 823 sh->max_id = 8;
815 sh->max_lun = 8; 824 sh->max_lun = 8;
816 825
@@ -841,7 +850,7 @@ static void find_pio_ISA(struct get_conf *buf)
841 continue; 850 continue;
842 if (!get_pio_conf_PIO(ISAbases[i], buf)) 851 if (!get_pio_conf_PIO(ISAbases[i], buf))
843 continue; 852 continue;
844 if (!register_pio_HBA(ISAbases[i], buf)) 853 if (!register_pio_HBA(ISAbases[i], buf, NULL))
845 release_region(ISAbases[i], 9); 854 release_region(ISAbases[i], 9);
846 else 855 else
847 ISAbases[i] = 0; 856 ISAbases[i] = 0;
@@ -873,7 +882,7 @@ static void find_pio_EISA(struct get_conf *buf)
873 if (get_pio_conf_PIO(base, buf)) { 882 if (get_pio_conf_PIO(base, buf)) {
874 DBG(DBG_PROBE && DBG_EISA, print_pio_config(buf)); 883 DBG(DBG_PROBE && DBG_EISA, print_pio_config(buf));
875 if (buf->IRQ) { 884 if (buf->IRQ) {
876 if (!register_pio_HBA(base, buf)) 885 if (!register_pio_HBA(base, buf, NULL))
877 release_region(base, 9); 886 release_region(base, 9);
878 } else { 887 } else {
879 printk(KERN_NOTICE "eata_dma: No valid IRQ. HBA " "removed from list\n"); 888 printk(KERN_NOTICE "eata_dma: No valid IRQ. HBA " "removed from list\n");
@@ -896,9 +905,9 @@ static void find_pio_PCI(struct get_conf *buf)
896 printk("eata_dma: kernel PCI support not enabled. Skipping scan for PCI HBAs.\n"); 905 printk("eata_dma: kernel PCI support not enabled. Skipping scan for PCI HBAs.\n");
897#else 906#else
898 struct pci_dev *dev = NULL; 907 struct pci_dev *dev = NULL;
899 u32 base, x; 908 unsigned long base, x;
900 909
901 while ((dev = pci_find_device(PCI_VENDOR_ID_DPT, PCI_DEVICE_ID_DPT, dev)) != NULL) { 910 while ((dev = pci_get_device(PCI_VENDOR_ID_DPT, PCI_DEVICE_ID_DPT, dev)) != NULL) {
902 DBG(DBG_PROBE && DBG_PCI, printk("eata_pio: find_PCI, HBA at %s\n", pci_name(dev))); 911 DBG(DBG_PROBE && DBG_PCI, printk("eata_pio: find_PCI, HBA at %s\n", pci_name(dev)));
903 if (pci_enable_device(dev)) 912 if (pci_enable_device(dev))
904 continue; 913 continue;
@@ -926,7 +935,7 @@ static void find_pio_PCI(struct get_conf *buf)
926 * eventually remove it from the EISA and ISA list 935 * eventually remove it from the EISA and ISA list
927 */ 936 */
928 937
929 if (!register_pio_HBA(base, buf)) { 938 if (!register_pio_HBA(base, buf, dev)) {
930 release_region(base, 9); 939 release_region(base, 9);
931 continue; 940 continue;
932 } 941 }
@@ -976,12 +985,12 @@ static int eata_pio_detect(struct scsi_host_template *tpnt)
976 printk("Registered HBAs:\n"); 985 printk("Registered HBAs:\n");
977 printk("HBA no. Boardtype: Revis: EATA: Bus: BaseIO: IRQ: Ch: ID: Pr:" " QS: SG: CPL:\n"); 986 printk("HBA no. Boardtype: Revis: EATA: Bus: BaseIO: IRQ: Ch: ID: Pr:" " QS: SG: CPL:\n");
978 for (i = 1; i <= registered_HBAs; i++) { 987 for (i = 1; i <= registered_HBAs; i++) {
979 printk("scsi%-2d: %.10s v%s 2.0%c %s %#.4x %2d %d %d %c" 988 printk("scsi%-2d: %.10s v%s 2.0%c %s %#.4lx %2d %d %d %c"
980 " %2d %2d %2d\n", 989 " %2d %2d %2d\n",
981 HBA_ptr->host_no, SD(HBA_ptr)->name, SD(HBA_ptr)->revision, 990 HBA_ptr->host_no, SD(HBA_ptr)->name, SD(HBA_ptr)->revision,
982 SD(HBA_ptr)->EATA_revision, (SD(HBA_ptr)->bustype == 'P') ? 991 SD(HBA_ptr)->EATA_revision, (SD(HBA_ptr)->bustype == 'P') ?
983 "PCI " : (SD(HBA_ptr)->bustype == 'E') ? "EISA" : "ISA ", 992 "PCI " : (SD(HBA_ptr)->bustype == 'E') ? "EISA" : "ISA ",
984 (uint) HBA_ptr->base, HBA_ptr->irq, SD(HBA_ptr)->channel, HBA_ptr->this_id, 993 HBA_ptr->base, HBA_ptr->irq, SD(HBA_ptr)->channel, HBA_ptr->this_id,
985 SD(HBA_ptr)->primary ? 'Y' : 'N', HBA_ptr->can_queue, 994 SD(HBA_ptr)->primary ? 'Y' : 'N', HBA_ptr->can_queue,
986 HBA_ptr->sg_tablesize, HBA_ptr->cmd_per_lun); 995 HBA_ptr->sg_tablesize, HBA_ptr->cmd_per_lun);
987 HBA_ptr = SD(HBA_ptr)->next; 996 HBA_ptr = SD(HBA_ptr)->next;
diff --git a/drivers/scsi/fcal.c b/drivers/scsi/fcal.c
index 7f891023aa15..c4e16c0775de 100644
--- a/drivers/scsi/fcal.c
+++ b/drivers/scsi/fcal.c
@@ -248,8 +248,7 @@ int fcal_proc_info (struct Scsi_Host *host, char *buffer, char **start, off_t of
248 if (scd->id == target) { 248 if (scd->id == target) {
249 SPRINTF (" [AL-PA: %02x, Id: %02d, Port WWN: %08x%08x, Node WWN: %08x%08x] ", 249 SPRINTF (" [AL-PA: %02x, Id: %02d, Port WWN: %08x%08x, Node WWN: %08x%08x] ",
250 alpa, target, u1[0], u1[1], u2[0], u2[1]); 250 alpa, target, u1[0], u1[1], u2[0], u2[1]);
251 SPRINTF ("%s ", (scd->type < MAX_SCSI_DEVICE_CODE) ? 251 SPRINTF ("%s ", scsi_device_type(scd->type));
252 scsi_device_types[(short) scd->type] : "Unknown device");
253 252
254 for (j = 0; (j < 8) && (scd->vendor[j] >= 0x20); j++) 253 for (j = 0; (j < 8) && (scd->vendor[j] >= 0x20); j++)
255 SPRINTF ("%c", scd->vendor[j]); 254 SPRINTF ("%c", scd->vendor[j]);
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 67f1100f3103..cdd893bb4e28 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -811,7 +811,6 @@ static int generic_NCR5380_proc_info(struct Scsi_Host *scsi_ptr, char *buffer, c
811 struct NCR5380_hostdata *hostdata; 811 struct NCR5380_hostdata *hostdata;
812#ifdef NCR5380_STATS 812#ifdef NCR5380_STATS
813 struct scsi_device *dev; 813 struct scsi_device *dev;
814 extern const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE];
815#endif 814#endif
816 815
817 NCR5380_setup(scsi_ptr); 816 NCR5380_setup(scsi_ptr);
@@ -851,7 +850,7 @@ static int generic_NCR5380_proc_info(struct Scsi_Host *scsi_ptr, char *buffer, c
851 long tr = hostdata->time_read[dev->id] / HZ; 850 long tr = hostdata->time_read[dev->id] / HZ;
852 long tw = hostdata->time_write[dev->id] / HZ; 851 long tw = hostdata->time_write[dev->id] / HZ;
853 852
854 PRINTP(" T:%d %s " ANDP dev->id ANDP(dev->type < MAX_SCSI_DEVICE_CODE) ? scsi_device_types[(int) dev->type] : "Unknown"); 853 PRINTP(" T:%d %s " ANDP dev->id ANDP scsi_device_type(dev->type));
855 for (i = 0; i < 8; i++) 854 for (i = 0; i < 8; i++)
856 if (dev->vendor[i] >= 0x20) 855 if (dev->vendor[i] >= 0x20)
857 *(buffer + (len++)) = dev->vendor[i]; 856 *(buffer + (len++)) = dev->vendor[i];
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index a0d831b1bada..18dbe5c27dac 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -47,7 +47,7 @@ void gvp11_setup (char *str, int *ints)
47 gvp11_xfer_mask = ints[1]; 47 gvp11_xfer_mask = ints[1];
48} 48}
49 49
50static int dma_setup (Scsi_Cmnd *cmd, int dir_in) 50static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
51{ 51{
52 unsigned short cntr = GVP11_DMAC_INT_ENABLE; 52 unsigned short cntr = GVP11_DMAC_INT_ENABLE;
53 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 53 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
@@ -142,8 +142,8 @@ static int dma_setup (Scsi_Cmnd *cmd, int dir_in)
142 return 0; 142 return 0;
143} 143}
144 144
145static void dma_stop (struct Scsi_Host *instance, Scsi_Cmnd *SCpnt, 145static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
146 int status) 146 int status)
147{ 147{
148 /* stop DMA */ 148 /* stop DMA */
149 DMA(instance)->SP_DMA = 1; 149 DMA(instance)->SP_DMA = 1;
@@ -341,7 +341,7 @@ release:
341 return num_gvp11; 341 return num_gvp11;
342} 342}
343 343
344static int gvp11_bus_reset(Scsi_Cmnd *cmd) 344static int gvp11_bus_reset(struct scsi_cmnd *cmd)
345{ 345{
346 /* FIXME perform bus-specific reset */ 346 /* FIXME perform bus-specific reset */
347 347
diff --git a/drivers/scsi/gvp11.h b/drivers/scsi/gvp11.h
index 575d219d14ba..bf22859a5035 100644
--- a/drivers/scsi/gvp11.h
+++ b/drivers/scsi/gvp11.h
@@ -13,10 +13,6 @@
13 13
14int gvp11_detect(struct scsi_host_template *); 14int gvp11_detect(struct scsi_host_template *);
15int gvp11_release(struct Scsi_Host *); 15int gvp11_release(struct Scsi_Host *);
16const char *wd33c93_info(void);
17int wd33c93_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
18int wd33c93_abort(Scsi_Cmnd *);
19int wd33c93_reset(Scsi_Cmnd *, unsigned int);
20 16
21#ifndef CMD_PER_LUN 17#ifndef CMD_PER_LUN
22#define CMD_PER_LUN 2 18#define CMD_PER_LUN 2
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index dfcb96f3e60c..68ef1636678d 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -265,6 +265,9 @@ static void scsi_host_dev_release(struct device *dev)
265 destroy_workqueue(shost->work_q); 265 destroy_workqueue(shost->work_q);
266 266
267 scsi_destroy_command_freelist(shost); 267 scsi_destroy_command_freelist(shost);
268 if (shost->bqt)
269 blk_free_tags(shost->bqt);
270
268 kfree(shost->shost_data); 271 kfree(shost->shost_data);
269 272
270 if (parent) 273 if (parent)
@@ -487,7 +490,9 @@ EXPORT_SYMBOL(scsi_is_host_device);
487 * @work: Work to queue for execution. 490 * @work: Work to queue for execution.
488 * 491 *
489 * Return value: 492 * Return value:
490 * 0 on success / != 0 for error 493 * 1 - work queued for execution
494 * 0 - work is already queued
495 * -EINVAL - work queue doesn't exist
491 **/ 496 **/
492int scsi_queue_work(struct Scsi_Host *shost, struct work_struct *work) 497int scsi_queue_work(struct Scsi_Host *shost, struct work_struct *work)
493{ 498{
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index bcb3444f1dcf..28bfb8f9f81d 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -15,7 +15,6 @@
15 * 15 *
16 * For more information, visit http://www.highpoint-tech.com 16 * For more information, visit http://www.highpoint-tech.com
17 */ 17 */
18#include <linux/config.h>
19#include <linux/module.h> 18#include <linux/module.h>
20#include <linux/types.h> 19#include <linux/types.h>
21#include <linux/string.h> 20#include <linux/string.h>
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index ed22b96580c6..01b8ac641eb8 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -156,8 +156,8 @@ static void gather_partition_info(void)
156{ 156{
157 struct device_node *rootdn; 157 struct device_node *rootdn;
158 158
159 char *ppartition_name; 159 const char *ppartition_name;
160 unsigned int *p_number_ptr; 160 const unsigned int *p_number_ptr;
161 161
162 /* Retrieve information about this partition */ 162 /* Retrieve information about this partition */
163 rootdn = find_path_device("/"); 163 rootdn = find_path_device("/");
@@ -165,14 +165,11 @@ static void gather_partition_info(void)
165 return; 165 return;
166 } 166 }
167 167
168 ppartition_name = 168 ppartition_name = get_property(rootdn, "ibm,partition-name", NULL);
169 get_property(rootdn, "ibm,partition-name", NULL);
170 if (ppartition_name) 169 if (ppartition_name)
171 strncpy(partition_name, ppartition_name, 170 strncpy(partition_name, ppartition_name,
172 sizeof(partition_name)); 171 sizeof(partition_name));
173 p_number_ptr = 172 p_number_ptr = get_property(rootdn, "ibm,partition-no", NULL);
174 (unsigned int *)get_property(rootdn, "ibm,partition-no",
175 NULL);
176 if (p_number_ptr) 173 if (p_number_ptr)
177 partition_number = *p_number_ptr; 174 partition_number = *p_number_ptr;
178} 175}
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 01080b3acf5e..7ed4eef8347b 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -175,6 +175,8 @@ struct ipr_error_table_t ipr_error_table[] = {
175 "Qualified success"}, 175 "Qualified success"},
176 {0x01080000, 1, 1, 176 {0x01080000, 1, 1,
177 "FFFE: Soft device bus error recovered by the IOA"}, 177 "FFFE: Soft device bus error recovered by the IOA"},
178 {0x01088100, 0, 1,
179 "4101: Soft device bus fabric error"},
178 {0x01170600, 0, 1, 180 {0x01170600, 0, 1,
179 "FFF9: Device sector reassign successful"}, 181 "FFF9: Device sector reassign successful"},
180 {0x01170900, 0, 1, 182 {0x01170900, 0, 1,
@@ -225,6 +227,8 @@ struct ipr_error_table_t ipr_error_table[] = {
225 "3109: IOA timed out a device command"}, 227 "3109: IOA timed out a device command"},
226 {0x04088000, 0, 0, 228 {0x04088000, 0, 0,
227 "3120: SCSI bus is not operational"}, 229 "3120: SCSI bus is not operational"},
230 {0x04088100, 0, 1,
231 "4100: Hard device bus fabric error"},
228 {0x04118000, 0, 1, 232 {0x04118000, 0, 1,
229 "9000: IOA reserved area data check"}, 233 "9000: IOA reserved area data check"},
230 {0x04118100, 0, 1, 234 {0x04118100, 0, 1,
@@ -273,6 +277,14 @@ struct ipr_error_table_t ipr_error_table[] = {
273 "9091: Incorrect hardware configuration change has been detected"}, 277 "9091: Incorrect hardware configuration change has been detected"},
274 {0x04678000, 0, 1, 278 {0x04678000, 0, 1,
275 "9073: Invalid multi-adapter configuration"}, 279 "9073: Invalid multi-adapter configuration"},
280 {0x04678100, 0, 1,
281 "4010: Incorrect connection between cascaded expanders"},
282 {0x04678200, 0, 1,
283 "4020: Connections exceed IOA design limits"},
284 {0x04678300, 0, 1,
285 "4030: Incorrect multipath connection"},
286 {0x04679000, 0, 1,
287 "4110: Unsupported enclosure function"},
276 {0x046E0000, 0, 1, 288 {0x046E0000, 0, 1,
277 "FFF4: Command to logical unit failed"}, 289 "FFF4: Command to logical unit failed"},
278 {0x05240000, 1, 0, 290 {0x05240000, 1, 0,
@@ -297,6 +309,8 @@ struct ipr_error_table_t ipr_error_table[] = {
297 "9031: Array protection temporarily suspended, protection resuming"}, 309 "9031: Array protection temporarily suspended, protection resuming"},
298 {0x06040600, 0, 1, 310 {0x06040600, 0, 1,
299 "9040: Array protection temporarily suspended, protection resuming"}, 311 "9040: Array protection temporarily suspended, protection resuming"},
312 {0x06288000, 0, 1,
313 "3140: Device bus not ready to ready transition"},
300 {0x06290000, 0, 1, 314 {0x06290000, 0, 1,
301 "FFFB: SCSI bus was reset"}, 315 "FFFB: SCSI bus was reset"},
302 {0x06290500, 0, 0, 316 {0x06290500, 0, 0,
@@ -319,6 +333,16 @@ struct ipr_error_table_t ipr_error_table[] = {
319 "3150: SCSI bus configuration error"}, 333 "3150: SCSI bus configuration error"},
320 {0x06678100, 0, 1, 334 {0x06678100, 0, 1,
321 "9074: Asymmetric advanced function disk configuration"}, 335 "9074: Asymmetric advanced function disk configuration"},
336 {0x06678300, 0, 1,
337 "4040: Incomplete multipath connection between IOA and enclosure"},
338 {0x06678400, 0, 1,
339 "4041: Incomplete multipath connection between enclosure and device"},
340 {0x06678500, 0, 1,
341 "9075: Incomplete multipath connection between IOA and remote IOA"},
342 {0x06678600, 0, 1,
343 "9076: Configuration error, missing remote IOA"},
344 {0x06679100, 0, 1,
345 "4050: Enclosure does not support a required multipath function"},
322 {0x06690200, 0, 1, 346 {0x06690200, 0, 1,
323 "9041: Array protection temporarily suspended"}, 347 "9041: Array protection temporarily suspended"},
324 {0x06698200, 0, 1, 348 {0x06698200, 0, 1,
@@ -331,6 +355,10 @@ struct ipr_error_table_t ipr_error_table[] = {
331 "9072: Link not operational transition"}, 355 "9072: Link not operational transition"},
332 {0x066B8200, 0, 1, 356 {0x066B8200, 0, 1,
333 "9032: Array exposed but still protected"}, 357 "9032: Array exposed but still protected"},
358 {0x066B9100, 0, 1,
359 "4061: Multipath redundancy level got better"},
360 {0x066B9200, 0, 1,
361 "4060: Multipath redundancy level got worse"},
334 {0x07270000, 0, 0, 362 {0x07270000, 0, 0,
335 "Failure due to other device"}, 363 "Failure due to other device"},
336 {0x07278000, 0, 1, 364 {0x07278000, 0, 1,
@@ -4099,8 +4127,7 @@ static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4099{ 4127{
4100 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; 4128 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4101 4129
4102 if ((be32_to_cpu(ioasa->ioasc_specific) & 4130 if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
4103 (IPR_ADDITIONAL_STATUS_FMT | IPR_AUTOSENSE_VALID)) == 0)
4104 return 0; 4131 return 0;
4105 4132
4106 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, 4133 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
@@ -4190,7 +4217,8 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4190 case IPR_IOASC_NR_INIT_CMD_REQUIRED: 4217 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4191 break; 4218 break;
4192 default: 4219 default:
4193 scsi_cmd->result |= (DID_ERROR << 16); 4220 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
4221 scsi_cmd->result |= (DID_ERROR << 16);
4194 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res)) 4222 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
4195 res->needs_sync_complete = 1; 4223 res->needs_sync_complete = 1;
4196 break; 4224 break;
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 1ad24df69d70..11eaff524327 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -36,8 +36,8 @@
36/* 36/*
37 * Literals 37 * Literals
38 */ 38 */
39#define IPR_DRIVER_VERSION "2.1.3" 39#define IPR_DRIVER_VERSION "2.1.4"
40#define IPR_DRIVER_DATE "(March 29, 2006)" 40#define IPR_DRIVER_DATE "(August 2, 2006)"
41 41
42/* 42/*
43 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding 43 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -45,6 +45,7 @@
45 * This can be adjusted at runtime through sysfs device attributes. 45 * This can be adjusted at runtime through sysfs device attributes.
46 */ 46 */
47#define IPR_MAX_CMD_PER_LUN 6 47#define IPR_MAX_CMD_PER_LUN 6
48#define IPR_MAX_CMD_PER_ATA_LUN 1
48 49
49/* 50/*
50 * IPR_NUM_BASE_CMD_BLKS: This defines the maximum number of 51 * IPR_NUM_BASE_CMD_BLKS: This defines the maximum number of
@@ -106,7 +107,7 @@
106#define IPR_IOA_BUS 0xff 107#define IPR_IOA_BUS 0xff
107#define IPR_IOA_TARGET 0xff 108#define IPR_IOA_TARGET 0xff
108#define IPR_IOA_LUN 0xff 109#define IPR_IOA_LUN 0xff
109#define IPR_MAX_NUM_BUSES 8 110#define IPR_MAX_NUM_BUSES 16
110#define IPR_MAX_BUS_TO_SCAN IPR_MAX_NUM_BUSES 111#define IPR_MAX_BUS_TO_SCAN IPR_MAX_NUM_BUSES
111 112
112#define IPR_NUM_RESET_RELOAD_RETRIES 3 113#define IPR_NUM_RESET_RELOAD_RETRIES 3
@@ -145,6 +146,7 @@
145#define IPR_LUN_RESET 0x40 146#define IPR_LUN_RESET 0x40
146#define IPR_TARGET_RESET 0x20 147#define IPR_TARGET_RESET 0x20
147#define IPR_BUS_RESET 0x10 148#define IPR_BUS_RESET 0x10
149#define IPR_ATA_PHY_RESET 0x80
148#define IPR_ID_HOST_RR_Q 0xC4 150#define IPR_ID_HOST_RR_Q 0xC4
149#define IPR_QUERY_IOA_CONFIG 0xC5 151#define IPR_QUERY_IOA_CONFIG 0xC5
150#define IPR_CANCEL_ALL_REQUESTS 0xCE 152#define IPR_CANCEL_ALL_REQUESTS 0xCE
@@ -295,7 +297,11 @@ struct ipr_std_inq_data {
295}__attribute__ ((packed)); 297}__attribute__ ((packed));
296 298
297struct ipr_config_table_entry { 299struct ipr_config_table_entry {
298 u8 service_level; 300 u8 proto;
301#define IPR_PROTO_SATA 0x02
302#define IPR_PROTO_SATA_ATAPI 0x03
303#define IPR_PROTO_SAS_STP 0x06
304#define IPR_PROTO_SAS_STP_ATAPI 0x07
299 u8 array_id; 305 u8 array_id;
300 u8 flags; 306 u8 flags;
301#define IPR_IS_IOA_RESOURCE 0x80 307#define IPR_IS_IOA_RESOURCE 0x80
@@ -307,6 +313,7 @@ struct ipr_config_table_entry {
307#define IPR_SUBTYPE_AF_DASD 0 313#define IPR_SUBTYPE_AF_DASD 0
308#define IPR_SUBTYPE_GENERIC_SCSI 1 314#define IPR_SUBTYPE_GENERIC_SCSI 1
309#define IPR_SUBTYPE_VOLUME_SET 2 315#define IPR_SUBTYPE_VOLUME_SET 2
316#define IPR_SUBTYPE_GENERIC_ATA 4
310 317
311#define IPR_QUEUEING_MODEL(res) ((((res)->cfgte.flags) & 0x70) >> 4) 318#define IPR_QUEUEING_MODEL(res) ((((res)->cfgte.flags) & 0x70) >> 4)
312#define IPR_QUEUE_FROZEN_MODEL 0 319#define IPR_QUEUE_FROZEN_MODEL 0
@@ -350,6 +357,7 @@ struct ipr_cmd_pkt {
350#define IPR_RQTYPE_SCSICDB 0x00 357#define IPR_RQTYPE_SCSICDB 0x00
351#define IPR_RQTYPE_IOACMD 0x01 358#define IPR_RQTYPE_IOACMD 0x01
352#define IPR_RQTYPE_HCAM 0x02 359#define IPR_RQTYPE_HCAM 0x02
360#define IPR_RQTYPE_ATA_PASSTHRU 0x04
353 361
354 u8 luntar_luntrn; 362 u8 luntar_luntrn;
355 363
@@ -373,6 +381,37 @@ struct ipr_cmd_pkt {
373 __be16 timeout; 381 __be16 timeout;
374}__attribute__ ((packed, aligned(4))); 382}__attribute__ ((packed, aligned(4)));
375 383
384struct ipr_ioarcb_ata_regs {
385 u8 flags;
386#define IPR_ATA_FLAG_PACKET_CMD 0x80
387#define IPR_ATA_FLAG_XFER_TYPE_DMA 0x40
388#define IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION 0x20
389 u8 reserved[3];
390
391 __be16 data;
392 u8 feature;
393 u8 nsect;
394 u8 lbal;
395 u8 lbam;
396 u8 lbah;
397 u8 device;
398 u8 command;
399 u8 reserved2[3];
400 u8 hob_feature;
401 u8 hob_nsect;
402 u8 hob_lbal;
403 u8 hob_lbam;
404 u8 hob_lbah;
405 u8 ctl;
406}__attribute__ ((packed, aligned(4)));
407
408struct ipr_ioarcb_add_data {
409 union {
410 struct ipr_ioarcb_ata_regs regs;
411 __be32 add_cmd_parms[10];
412 }u;
413}__attribute__ ((packed, aligned(4)));
414
376/* IOA Request Control Block 128 bytes */ 415/* IOA Request Control Block 128 bytes */
377struct ipr_ioarcb { 416struct ipr_ioarcb {
378 __be32 ioarcb_host_pci_addr; 417 __be32 ioarcb_host_pci_addr;
@@ -397,7 +436,7 @@ struct ipr_ioarcb {
397 struct ipr_cmd_pkt cmd_pkt; 436 struct ipr_cmd_pkt cmd_pkt;
398 437
399 __be32 add_cmd_parms_len; 438 __be32 add_cmd_parms_len;
400 __be32 add_cmd_parms[10]; 439 struct ipr_ioarcb_add_data add_data;
401}__attribute__((packed, aligned (4))); 440}__attribute__((packed, aligned (4)));
402 441
403struct ipr_ioadl_desc { 442struct ipr_ioadl_desc {
@@ -433,6 +472,21 @@ struct ipr_ioasa_gpdd {
433 __be32 ioa_data[2]; 472 __be32 ioa_data[2];
434}__attribute__((packed, aligned (4))); 473}__attribute__((packed, aligned (4)));
435 474
475struct ipr_ioasa_gata {
476 u8 error;
477 u8 nsect; /* Interrupt reason */
478 u8 lbal;
479 u8 lbam;
480 u8 lbah;
481 u8 device;
482 u8 status;
483 u8 alt_status; /* ATA CTL */
484 u8 hob_nsect;
485 u8 hob_lbal;
486 u8 hob_lbam;
487 u8 hob_lbah;
488}__attribute__((packed, aligned (4)));
489
436struct ipr_auto_sense { 490struct ipr_auto_sense {
437 __be16 auto_sense_len; 491 __be16 auto_sense_len;
438 __be16 ioa_data_len; 492 __be16 ioa_data_len;
@@ -466,6 +520,7 @@ struct ipr_ioasa {
466 __be32 ioasc_specific; /* status code specific field */ 520 __be32 ioasc_specific; /* status code specific field */
467#define IPR_ADDITIONAL_STATUS_FMT 0x80000000 521#define IPR_ADDITIONAL_STATUS_FMT 0x80000000
468#define IPR_AUTOSENSE_VALID 0x40000000 522#define IPR_AUTOSENSE_VALID 0x40000000
523#define IPR_ATA_DEVICE_WAS_RESET 0x20000000
469#define IPR_IOASC_SPECIFIC_MASK 0x00ffffff 524#define IPR_IOASC_SPECIFIC_MASK 0x00ffffff
470#define IPR_FIELD_POINTER_VALID (0x80000000 >> 8) 525#define IPR_FIELD_POINTER_VALID (0x80000000 >> 8)
471#define IPR_FIELD_POINTER_MASK 0x0000ffff 526#define IPR_FIELD_POINTER_MASK 0x0000ffff
@@ -474,6 +529,7 @@ struct ipr_ioasa {
474 struct ipr_ioasa_vset vset; 529 struct ipr_ioasa_vset vset;
475 struct ipr_ioasa_af_dasd dasd; 530 struct ipr_ioasa_af_dasd dasd;
476 struct ipr_ioasa_gpdd gpdd; 531 struct ipr_ioasa_gpdd gpdd;
532 struct ipr_ioasa_gata gata;
477 } u; 533 } u;
478 534
479 struct ipr_auto_sense auto_sense; 535 struct ipr_auto_sense auto_sense;
@@ -1308,6 +1364,22 @@ static inline int ipr_is_scsi_disk(struct ipr_resource_entry *res)
1308} 1364}
1309 1365
1310/** 1366/**
1367 * ipr_is_gata - Determine if a resource is a generic ATA resource
1368 * @res: resource entry struct
1369 *
1370 * Return value:
1371 * 1 if GATA / 0 if not GATA
1372 **/
1373static inline int ipr_is_gata(struct ipr_resource_entry *res)
1374{
1375 if (!ipr_is_ioa_resource(res) &&
1376 IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_GENERIC_ATA)
1377 return 1;
1378 else
1379 return 0;
1380}
1381
1382/**
1311 * ipr_is_naca_model - Determine if a resource is using NACA queueing model 1383 * ipr_is_naca_model - Determine if a resource is using NACA queueing model
1312 * @res: resource entry struct 1384 * @res: resource entry struct
1313 * 1385 *
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 058f094f945a..0a9dbc59663f 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -108,8 +108,8 @@ iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf,
108{ 108{
109 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 109 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
110 110
111 crypto_digest_digest(tcp_conn->tx_tfm, &buf->sg, 1, crc); 111 crypto_hash_digest(&tcp_conn->tx_hash, &buf->sg, buf->sg.length, crc);
112 buf->sg.length += sizeof(uint32_t); 112 buf->sg.length = tcp_conn->hdr_size;
113} 113}
114 114
115static inline int 115static inline int
@@ -281,7 +281,6 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
281{ 281{
282 struct iscsi_data *hdr; 282 struct iscsi_data *hdr;
283 struct scsi_cmnd *sc = ctask->sc; 283 struct scsi_cmnd *sc = ctask->sc;
284 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
285 284
286 hdr = &r2t->dtask.hdr; 285 hdr = &r2t->dtask.hdr;
287 memset(hdr, 0, sizeof(struct iscsi_data)); 286 memset(hdr, 0, sizeof(struct iscsi_data));
@@ -336,10 +335,12 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
336 sg_count += sg->length; 335 sg_count += sg->length;
337 } 336 }
338 BUG_ON(r2t->sg == NULL); 337 BUG_ON(r2t->sg == NULL);
339 } else 338 } else {
340 iscsi_buf_init_iov(&tcp_ctask->sendbuf, 339 iscsi_buf_init_iov(&r2t->sendbuf,
341 (char*)sc->request_buffer + r2t->data_offset, 340 (char*)sc->request_buffer + r2t->data_offset,
342 r2t->data_count); 341 r2t->data_count);
342 r2t->sg = NULL;
343 }
343} 344}
344 345
345/** 346/**
@@ -358,8 +359,11 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
358 int r2tsn = be32_to_cpu(rhdr->r2tsn); 359 int r2tsn = be32_to_cpu(rhdr->r2tsn);
359 int rc; 360 int rc;
360 361
361 if (tcp_conn->in.datalen) 362 if (tcp_conn->in.datalen) {
363 printk(KERN_ERR "iscsi_tcp: invalid R2t with datalen %d\n",
364 tcp_conn->in.datalen);
362 return ISCSI_ERR_DATALEN; 365 return ISCSI_ERR_DATALEN;
366 }
363 367
364 if (tcp_ctask->exp_r2tsn && tcp_ctask->exp_r2tsn != r2tsn) 368 if (tcp_ctask->exp_r2tsn && tcp_ctask->exp_r2tsn != r2tsn)
365 return ISCSI_ERR_R2TSN; 369 return ISCSI_ERR_R2TSN;
@@ -385,15 +389,23 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
385 389
386 r2t->exp_statsn = rhdr->statsn; 390 r2t->exp_statsn = rhdr->statsn;
387 r2t->data_length = be32_to_cpu(rhdr->data_length); 391 r2t->data_length = be32_to_cpu(rhdr->data_length);
388 if (r2t->data_length == 0 || 392 if (r2t->data_length == 0) {
389 r2t->data_length > session->max_burst) { 393 printk(KERN_ERR "iscsi_tcp: invalid R2T with zero data len\n");
390 spin_unlock(&session->lock); 394 spin_unlock(&session->lock);
391 return ISCSI_ERR_DATALEN; 395 return ISCSI_ERR_DATALEN;
392 } 396 }
393 397
398 if (r2t->data_length > session->max_burst)
399 debug_scsi("invalid R2T with data len %u and max burst %u."
400 "Attempting to execute request.\n",
401 r2t->data_length, session->max_burst);
402
394 r2t->data_offset = be32_to_cpu(rhdr->data_offset); 403 r2t->data_offset = be32_to_cpu(rhdr->data_offset);
395 if (r2t->data_offset + r2t->data_length > ctask->total_length) { 404 if (r2t->data_offset + r2t->data_length > ctask->total_length) {
396 spin_unlock(&session->lock); 405 spin_unlock(&session->lock);
406 printk(KERN_ERR "iscsi_tcp: invalid R2T with data len %u at "
407 "offset %u and total length %d\n", r2t->data_length,
408 r2t->data_offset, ctask->total_length);
397 return ISCSI_ERR_DATALEN; 409 return ISCSI_ERR_DATALEN;
398 } 410 }
399 411
@@ -456,7 +468,8 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
456 468
457 sg_init_one(&sg, (u8 *)hdr, 469 sg_init_one(&sg, (u8 *)hdr,
458 sizeof(struct iscsi_hdr) + ahslen); 470 sizeof(struct iscsi_hdr) + ahslen);
459 crypto_digest_digest(tcp_conn->rx_tfm, &sg, 1, (u8 *)&cdgst); 471 crypto_hash_digest(&tcp_conn->rx_hash, &sg, sg.length,
472 (u8 *)&cdgst);
460 rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) + 473 rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) +
461 ahslen); 474 ahslen);
462 if (cdgst != rdgst) { 475 if (cdgst != rdgst) {
@@ -492,7 +505,6 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
492 goto copy_hdr; 505 goto copy_hdr;
493 506
494 spin_lock(&session->lock); 507 spin_lock(&session->lock);
495 iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask);
496 rc = __iscsi_complete_pdu(conn, hdr, NULL, 0); 508 rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
497 spin_unlock(&session->lock); 509 spin_unlock(&session->lock);
498 break; 510 break;
@@ -637,10 +649,9 @@ iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask,
637 * byte counters. 649 * byte counters.
638 **/ 650 **/
639static inline int 651static inline int
640iscsi_tcp_copy(struct iscsi_conn *conn) 652iscsi_tcp_copy(struct iscsi_conn *conn, int buf_size)
641{ 653{
642 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 654 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
643 int buf_size = tcp_conn->in.datalen;
644 int buf_left = buf_size - tcp_conn->data_copied; 655 int buf_left = buf_size - tcp_conn->data_copied;
645 int size = min(tcp_conn->in.copy, buf_left); 656 int size = min(tcp_conn->in.copy, buf_left);
646 int rc; 657 int rc;
@@ -665,15 +676,15 @@ iscsi_tcp_copy(struct iscsi_conn *conn)
665} 676}
666 677
667static inline void 678static inline void
668partial_sg_digest_update(struct iscsi_tcp_conn *tcp_conn, 679partial_sg_digest_update(struct hash_desc *desc, struct scatterlist *sg,
669 struct scatterlist *sg, int offset, int length) 680 int offset, int length)
670{ 681{
671 struct scatterlist temp; 682 struct scatterlist temp;
672 683
673 memcpy(&temp, sg, sizeof(struct scatterlist)); 684 memcpy(&temp, sg, sizeof(struct scatterlist));
674 temp.offset = offset; 685 temp.offset = offset;
675 temp.length = length; 686 temp.length = length;
676 crypto_digest_update(tcp_conn->data_rx_tfm, &temp, 1); 687 crypto_hash_update(desc, &temp, length);
677} 688}
678 689
679static void 690static void
@@ -682,7 +693,7 @@ iscsi_recv_digest_update(struct iscsi_tcp_conn *tcp_conn, char* buf, int len)
682 struct scatterlist tmp; 693 struct scatterlist tmp;
683 694
684 sg_init_one(&tmp, buf, len); 695 sg_init_one(&tmp, buf, len);
685 crypto_digest_update(tcp_conn->data_rx_tfm, &tmp, 1); 696 crypto_hash_update(&tcp_conn->rx_hash, &tmp, len);
686} 697}
687 698
688static int iscsi_scsi_data_in(struct iscsi_conn *conn) 699static int iscsi_scsi_data_in(struct iscsi_conn *conn)
@@ -736,11 +747,12 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
736 if (!rc) { 747 if (!rc) {
737 if (conn->datadgst_en) { 748 if (conn->datadgst_en) {
738 if (!offset) 749 if (!offset)
739 crypto_digest_update( 750 crypto_hash_update(
740 tcp_conn->data_rx_tfm, 751 &tcp_conn->rx_hash,
741 &sg[i], 1); 752 &sg[i], 1);
742 else 753 else
743 partial_sg_digest_update(tcp_conn, 754 partial_sg_digest_update(
755 &tcp_conn->rx_hash,
744 &sg[i], 756 &sg[i],
745 sg[i].offset + offset, 757 sg[i].offset + offset,
746 sg[i].length - offset); 758 sg[i].length - offset);
@@ -754,8 +766,10 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
754 /* 766 /*
755 * data-in is complete, but buffer not... 767 * data-in is complete, but buffer not...
756 */ 768 */
757 partial_sg_digest_update(tcp_conn, &sg[i], 769 partial_sg_digest_update(&tcp_conn->rx_hash,
758 sg[i].offset, sg[i].length-rc); 770 &sg[i],
771 sg[i].offset,
772 sg[i].length-rc);
759 rc = 0; 773 rc = 0;
760 break; 774 break;
761 } 775 }
@@ -772,7 +786,6 @@ done:
772 (long)sc, sc->result, ctask->itt, 786 (long)sc, sc->result, ctask->itt,
773 tcp_conn->in.hdr->flags); 787 tcp_conn->in.hdr->flags);
774 spin_lock(&conn->session->lock); 788 spin_lock(&conn->session->lock);
775 iscsi_tcp_cleanup_ctask(conn, ctask);
776 __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0); 789 __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
777 spin_unlock(&conn->session->lock); 790 spin_unlock(&conn->session->lock);
778 } 791 }
@@ -792,9 +805,6 @@ iscsi_data_recv(struct iscsi_conn *conn)
792 rc = iscsi_scsi_data_in(conn); 805 rc = iscsi_scsi_data_in(conn);
793 break; 806 break;
794 case ISCSI_OP_SCSI_CMD_RSP: 807 case ISCSI_OP_SCSI_CMD_RSP:
795 spin_lock(&conn->session->lock);
796 iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask);
797 spin_unlock(&conn->session->lock);
798 case ISCSI_OP_TEXT_RSP: 808 case ISCSI_OP_TEXT_RSP:
799 case ISCSI_OP_LOGIN_RSP: 809 case ISCSI_OP_LOGIN_RSP:
800 case ISCSI_OP_ASYNC_EVENT: 810 case ISCSI_OP_ASYNC_EVENT:
@@ -803,7 +813,7 @@ iscsi_data_recv(struct iscsi_conn *conn)
803 * Collect data segment to the connection's data 813 * Collect data segment to the connection's data
804 * placeholder 814 * placeholder
805 */ 815 */
806 if (iscsi_tcp_copy(conn)) { 816 if (iscsi_tcp_copy(conn, tcp_conn->in.datalen)) {
807 rc = -EAGAIN; 817 rc = -EAGAIN;
808 goto exit; 818 goto exit;
809 } 819 }
@@ -876,10 +886,8 @@ more:
876 */ 886 */
877 rc = iscsi_tcp_hdr_recv(conn); 887 rc = iscsi_tcp_hdr_recv(conn);
878 if (!rc && tcp_conn->in.datalen) { 888 if (!rc && tcp_conn->in.datalen) {
879 if (conn->datadgst_en) { 889 if (conn->datadgst_en)
880 BUG_ON(!tcp_conn->data_rx_tfm); 890 crypto_hash_init(&tcp_conn->rx_hash);
881 crypto_digest_init(tcp_conn->data_rx_tfm);
882 }
883 tcp_conn->in_progress = IN_PROGRESS_DATA_RECV; 891 tcp_conn->in_progress = IN_PROGRESS_DATA_RECV;
884 } else if (rc) { 892 } else if (rc) {
885 iscsi_conn_failure(conn, rc); 893 iscsi_conn_failure(conn, rc);
@@ -892,10 +900,15 @@ more:
892 900
893 debug_tcp("extra data_recv offset %d copy %d\n", 901 debug_tcp("extra data_recv offset %d copy %d\n",
894 tcp_conn->in.offset, tcp_conn->in.copy); 902 tcp_conn->in.offset, tcp_conn->in.copy);
895 skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset, 903 rc = iscsi_tcp_copy(conn, sizeof(uint32_t));
896 &recv_digest, 4); 904 if (rc) {
897 tcp_conn->in.offset += 4; 905 if (rc == -EAGAIN)
898 tcp_conn->in.copy -= 4; 906 goto again;
907 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
908 return 0;
909 }
910
911 memcpy(&recv_digest, conn->data, sizeof(uint32_t));
899 if (recv_digest != tcp_conn->in.datadgst) { 912 if (recv_digest != tcp_conn->in.datadgst) {
900 debug_tcp("iscsi_tcp: data digest error!" 913 debug_tcp("iscsi_tcp: data digest error!"
901 "0x%x != 0x%x\n", recv_digest, 914 "0x%x != 0x%x\n", recv_digest,
@@ -931,13 +944,14 @@ more:
931 tcp_conn->in.padding); 944 tcp_conn->in.padding);
932 memset(pad, 0, tcp_conn->in.padding); 945 memset(pad, 0, tcp_conn->in.padding);
933 sg_init_one(&sg, pad, tcp_conn->in.padding); 946 sg_init_one(&sg, pad, tcp_conn->in.padding);
934 crypto_digest_update(tcp_conn->data_rx_tfm, 947 crypto_hash_update(&tcp_conn->rx_hash,
935 &sg, 1); 948 &sg, sg.length);
936 } 949 }
937 crypto_digest_final(tcp_conn->data_rx_tfm, 950 crypto_hash_final(&tcp_conn->rx_hash,
938 (u8 *) & tcp_conn->in.datadgst); 951 (u8 *) &tcp_conn->in.datadgst);
939 debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst); 952 debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst);
940 tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV; 953 tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV;
954 tcp_conn->data_copied = 0;
941 } else 955 } else
942 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; 956 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
943 } 957 }
@@ -1177,37 +1191,12 @@ iscsi_sendpage(struct iscsi_conn *conn, struct iscsi_buf *buf,
1177 1191
1178static inline void 1192static inline void
1179iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn, 1193iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn,
1180 struct iscsi_cmd_task *ctask) 1194 struct iscsi_tcp_cmd_task *tcp_ctask)
1181{ 1195{
1182 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1196 crypto_hash_init(&tcp_conn->tx_hash);
1183
1184 BUG_ON(!tcp_conn->data_tx_tfm);
1185 crypto_digest_init(tcp_conn->data_tx_tfm);
1186 tcp_ctask->digest_count = 4; 1197 tcp_ctask->digest_count = 4;
1187} 1198}
1188 1199
1189static int
1190iscsi_digest_final_send(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1191 struct iscsi_buf *buf, uint32_t *digest, int final)
1192{
1193 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1194 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1195 int rc = 0;
1196 int sent = 0;
1197
1198 if (final)
1199 crypto_digest_final(tcp_conn->data_tx_tfm, (u8*)digest);
1200
1201 iscsi_buf_init_iov(buf, (char*)digest, 4);
1202 rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent);
1203 if (rc) {
1204 tcp_ctask->datadigest = *digest;
1205 tcp_ctask->xmstate |= XMSTATE_DATA_DIGEST;
1206 } else
1207 tcp_ctask->digest_count = 4;
1208 return rc;
1209}
1210
1211/** 1200/**
1212 * iscsi_solicit_data_cont - initialize next Data-Out 1201 * iscsi_solicit_data_cont - initialize next Data-Out
1213 * @conn: iscsi connection 1202 * @conn: iscsi connection
@@ -1225,7 +1214,6 @@ static void
1225iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, 1214iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1226 struct iscsi_r2t_info *r2t, int left) 1215 struct iscsi_r2t_info *r2t, int left)
1227{ 1216{
1228 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1229 struct iscsi_data *hdr; 1217 struct iscsi_data *hdr;
1230 struct scsi_cmnd *sc = ctask->sc; 1218 struct scsi_cmnd *sc = ctask->sc;
1231 int new_offset; 1219 int new_offset;
@@ -1254,27 +1242,30 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1254 iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr, 1242 iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr,
1255 sizeof(struct iscsi_hdr)); 1243 sizeof(struct iscsi_hdr));
1256 1244
1257 if (sc->use_sg && !iscsi_buf_left(&r2t->sendbuf)) { 1245 if (iscsi_buf_left(&r2t->sendbuf))
1258 BUG_ON(tcp_ctask->bad_sg == r2t->sg); 1246 return;
1247
1248 if (sc->use_sg) {
1259 iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg); 1249 iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
1260 r2t->sg += 1; 1250 r2t->sg += 1;
1261 } else 1251 } else {
1262 iscsi_buf_init_iov(&tcp_ctask->sendbuf, 1252 iscsi_buf_init_iov(&r2t->sendbuf,
1263 (char*)sc->request_buffer + new_offset, 1253 (char*)sc->request_buffer + new_offset,
1264 r2t->data_count); 1254 r2t->data_count);
1255 r2t->sg = NULL;
1256 }
1265} 1257}
1266 1258
1267static void 1259static void iscsi_set_padding(struct iscsi_tcp_cmd_task *tcp_ctask,
1268iscsi_unsolicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1260 unsigned long len)
1269{ 1261{
1270 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1262 tcp_ctask->pad_count = len & (ISCSI_PAD_LEN - 1);
1271 struct iscsi_data_task *dtask; 1263 if (!tcp_ctask->pad_count)
1264 return;
1272 1265
1273 dtask = tcp_ctask->dtask = &tcp_ctask->unsol_dtask; 1266 tcp_ctask->pad_count = ISCSI_PAD_LEN - tcp_ctask->pad_count;
1274 iscsi_prep_unsolicit_data_pdu(ctask, &dtask->hdr, 1267 debug_scsi("write padding %d bytes\n", tcp_ctask->pad_count);
1275 tcp_ctask->r2t_data_count); 1268 tcp_ctask->xmstate |= XMSTATE_W_PAD;
1276 iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)&dtask->hdr,
1277 sizeof(struct iscsi_hdr));
1278} 1269}
1279 1270
1280/** 1271/**
@@ -1302,38 +1293,20 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask)
1302 if (sc->use_sg) { 1293 if (sc->use_sg) {
1303 struct scatterlist *sg = sc->request_buffer; 1294 struct scatterlist *sg = sc->request_buffer;
1304 1295
1305 iscsi_buf_init_sg(&tcp_ctask->sendbuf, 1296 iscsi_buf_init_sg(&tcp_ctask->sendbuf, sg);
1306 &sg[tcp_ctask->sg_count++]); 1297 tcp_ctask->sg = sg + 1;
1307 tcp_ctask->sg = sg;
1308 tcp_ctask->bad_sg = sg + sc->use_sg; 1298 tcp_ctask->bad_sg = sg + sc->use_sg;
1309 } else 1299 } else {
1310 iscsi_buf_init_iov(&tcp_ctask->sendbuf, 1300 iscsi_buf_init_iov(&tcp_ctask->sendbuf,
1311 sc->request_buffer, 1301 sc->request_buffer,
1312 sc->request_bufflen); 1302 sc->request_bufflen);
1313 1303 tcp_ctask->sg = NULL;
1314 if (ctask->imm_count) 1304 tcp_ctask->bad_sg = NULL;
1315 tcp_ctask->xmstate |= XMSTATE_IMM_DATA;
1316
1317 tcp_ctask->pad_count = ctask->total_length & (ISCSI_PAD_LEN-1);
1318 if (tcp_ctask->pad_count) {
1319 tcp_ctask->pad_count = ISCSI_PAD_LEN -
1320 tcp_ctask->pad_count;
1321 debug_scsi("write padding %d bytes\n",
1322 tcp_ctask->pad_count);
1323 tcp_ctask->xmstate |= XMSTATE_W_PAD;
1324 } 1305 }
1325 1306 debug_scsi("cmd [itt 0x%x total %d imm_data %d "
1326 if (ctask->unsol_count) 1307 "unsol count %d, unsol offset %d]\n",
1327 tcp_ctask->xmstate |= XMSTATE_UNS_HDR |
1328 XMSTATE_UNS_INIT;
1329 tcp_ctask->r2t_data_count = ctask->total_length -
1330 ctask->imm_count -
1331 ctask->unsol_count;
1332
1333 debug_scsi("cmd [itt 0x%x total %d imm %d imm_data %d "
1334 "r2t_data %d]\n",
1335 ctask->itt, ctask->total_length, ctask->imm_count, 1308 ctask->itt, ctask->total_length, ctask->imm_count,
1336 ctask->unsol_count, tcp_ctask->r2t_data_count); 1309 ctask->unsol_count, ctask->unsol_offset);
1337 } else 1310 } else
1338 tcp_ctask->xmstate = XMSTATE_R_HDR; 1311 tcp_ctask->xmstate = XMSTATE_R_HDR;
1339 1312
@@ -1415,8 +1388,8 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
1415} 1388}
1416 1389
1417static inline int 1390static inline int
1418handle_xmstate_r_hdr(struct iscsi_conn *conn, 1391iscsi_send_read_hdr(struct iscsi_conn *conn,
1419 struct iscsi_tcp_cmd_task *tcp_ctask) 1392 struct iscsi_tcp_cmd_task *tcp_ctask)
1420{ 1393{
1421 int rc; 1394 int rc;
1422 1395
@@ -1434,7 +1407,7 @@ handle_xmstate_r_hdr(struct iscsi_conn *conn,
1434} 1407}
1435 1408
1436static inline int 1409static inline int
1437handle_xmstate_w_hdr(struct iscsi_conn *conn, 1410iscsi_send_write_hdr(struct iscsi_conn *conn,
1438 struct iscsi_cmd_task *ctask) 1411 struct iscsi_cmd_task *ctask)
1439{ 1412{
1440 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1413 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
@@ -1445,85 +1418,126 @@ handle_xmstate_w_hdr(struct iscsi_conn *conn,
1445 iscsi_hdr_digest(conn, &tcp_ctask->headbuf, 1418 iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
1446 (u8*)tcp_ctask->hdrext); 1419 (u8*)tcp_ctask->hdrext);
1447 rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count); 1420 rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count);
1448 if (rc) 1421 if (rc) {
1449 tcp_ctask->xmstate |= XMSTATE_W_HDR; 1422 tcp_ctask->xmstate |= XMSTATE_W_HDR;
1450 return rc; 1423 return rc;
1424 }
1425
1426 if (ctask->imm_count) {
1427 tcp_ctask->xmstate |= XMSTATE_IMM_DATA;
1428 iscsi_set_padding(tcp_ctask, ctask->imm_count);
1429
1430 if (ctask->conn->datadgst_en) {
1431 iscsi_data_digest_init(ctask->conn->dd_data, tcp_ctask);
1432 tcp_ctask->immdigest = 0;
1433 }
1434 }
1435
1436 if (ctask->unsol_count)
1437 tcp_ctask->xmstate |= XMSTATE_UNS_HDR | XMSTATE_UNS_INIT;
1438 return 0;
1451} 1439}
1452 1440
1453static inline int 1441static int
1454handle_xmstate_data_digest(struct iscsi_conn *conn, 1442iscsi_send_padding(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1455 struct iscsi_cmd_task *ctask)
1456{ 1443{
1457 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1444 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1458 int rc; 1445 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1446 int sent = 0, rc;
1459 1447
1460 tcp_ctask->xmstate &= ~XMSTATE_DATA_DIGEST; 1448 if (tcp_ctask->xmstate & XMSTATE_W_PAD) {
1461 debug_tcp("resent data digest 0x%x\n", tcp_ctask->datadigest); 1449 iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad,
1462 rc = iscsi_digest_final_send(conn, ctask, &tcp_ctask->immbuf, 1450 tcp_ctask->pad_count);
1463 &tcp_ctask->datadigest, 0); 1451 if (conn->datadgst_en)
1452 crypto_hash_update(&tcp_conn->tx_hash,
1453 &tcp_ctask->sendbuf.sg,
1454 tcp_ctask->sendbuf.sg.length);
1455 } else if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_PAD))
1456 return 0;
1457
1458 tcp_ctask->xmstate &= ~XMSTATE_W_PAD;
1459 tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_PAD;
1460 debug_scsi("sending %d pad bytes for itt 0x%x\n",
1461 tcp_ctask->pad_count, ctask->itt);
1462 rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, &tcp_ctask->pad_count,
1463 &sent);
1464 if (rc) { 1464 if (rc) {
1465 tcp_ctask->xmstate |= XMSTATE_DATA_DIGEST; 1465 debug_scsi("padding send failed %d\n", rc);
1466 debug_tcp("resent data digest 0x%x fail!\n", 1466 tcp_ctask->xmstate |= XMSTATE_W_RESEND_PAD;
1467 tcp_ctask->datadigest);
1468 } 1467 }
1469
1470 return rc; 1468 return rc;
1471} 1469}
1472 1470
1473static inline int 1471static int
1474handle_xmstate_imm_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1472iscsi_send_digest(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1473 struct iscsi_buf *buf, uint32_t *digest)
1475{ 1474{
1476 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1475 struct iscsi_tcp_cmd_task *tcp_ctask;
1477 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1476 struct iscsi_tcp_conn *tcp_conn;
1478 int rc; 1477 int rc, sent = 0;
1479 1478
1480 BUG_ON(!ctask->imm_count); 1479 if (!conn->datadgst_en)
1481 tcp_ctask->xmstate &= ~XMSTATE_IMM_DATA; 1480 return 0;
1482 1481
1483 if (conn->datadgst_en) { 1482 tcp_ctask = ctask->dd_data;
1484 iscsi_data_digest_init(tcp_conn, ctask); 1483 tcp_conn = conn->dd_data;
1485 tcp_ctask->immdigest = 0;
1486 }
1487 1484
1488 for (;;) { 1485 if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_DATA_DIGEST)) {
1489 rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, 1486 crypto_hash_final(&tcp_conn->tx_hash, (u8*)digest);
1490 &ctask->imm_count, &tcp_ctask->sent); 1487 iscsi_buf_init_iov(buf, (char*)digest, 4);
1491 if (rc) { 1488 }
1492 tcp_ctask->xmstate |= XMSTATE_IMM_DATA; 1489 tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_DATA_DIGEST;
1493 if (conn->datadgst_en) {
1494 crypto_digest_final(tcp_conn->data_tx_tfm,
1495 (u8*)&tcp_ctask->immdigest);
1496 debug_tcp("tx imm sendpage fail 0x%x\n",
1497 tcp_ctask->datadigest);
1498 }
1499 return rc;
1500 }
1501 if (conn->datadgst_en)
1502 crypto_digest_update(tcp_conn->data_tx_tfm,
1503 &tcp_ctask->sendbuf.sg, 1);
1504 1490
1505 if (!ctask->imm_count) 1491 rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent);
1506 break; 1492 if (!rc)
1507 iscsi_buf_init_sg(&tcp_ctask->sendbuf, 1493 debug_scsi("sent digest 0x%x for itt 0x%x\n", *digest,
1508 &tcp_ctask->sg[tcp_ctask->sg_count++]); 1494 ctask->itt);
1495 else {
1496 debug_scsi("sending digest 0x%x failed for itt 0x%x!\n",
1497 *digest, ctask->itt);
1498 tcp_ctask->xmstate |= XMSTATE_W_RESEND_DATA_DIGEST;
1509 } 1499 }
1500 return rc;
1501}
1510 1502
1511 if (conn->datadgst_en && !(tcp_ctask->xmstate & XMSTATE_W_PAD)) { 1503static int
1512 rc = iscsi_digest_final_send(conn, ctask, &tcp_ctask->immbuf, 1504iscsi_send_data(struct iscsi_cmd_task *ctask, struct iscsi_buf *sendbuf,
1513 &tcp_ctask->immdigest, 1); 1505 struct scatterlist **sg, int *sent, int *count,
1514 if (rc) { 1506 struct iscsi_buf *digestbuf, uint32_t *digest)
1515 debug_tcp("sending imm digest 0x%x fail!\n", 1507{
1516 tcp_ctask->immdigest); 1508 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1517 return rc; 1509 struct iscsi_conn *conn = ctask->conn;
1510 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1511 int rc, buf_sent, offset;
1512
1513 while (*count) {
1514 buf_sent = 0;
1515 offset = sendbuf->sent;
1516
1517 rc = iscsi_sendpage(conn, sendbuf, count, &buf_sent);
1518 *sent = *sent + buf_sent;
1519 if (buf_sent && conn->datadgst_en)
1520 partial_sg_digest_update(&tcp_conn->tx_hash,
1521 &sendbuf->sg, sendbuf->sg.offset + offset,
1522 buf_sent);
1523 if (!iscsi_buf_left(sendbuf) && *sg != tcp_ctask->bad_sg) {
1524 iscsi_buf_init_sg(sendbuf, *sg);
1525 *sg = *sg + 1;
1518 } 1526 }
1519 debug_tcp("sending imm digest 0x%x\n", tcp_ctask->immdigest); 1527
1528 if (rc)
1529 return rc;
1520 } 1530 }
1521 1531
1522 return 0; 1532 rc = iscsi_send_padding(conn, ctask);
1533 if (rc)
1534 return rc;
1535
1536 return iscsi_send_digest(conn, ctask, digestbuf, digest);
1523} 1537}
1524 1538
1525static inline int 1539static int
1526handle_xmstate_uns_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1540iscsi_send_unsol_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1527{ 1541{
1528 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1542 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1529 struct iscsi_data_task *dtask; 1543 struct iscsi_data_task *dtask;
@@ -1531,12 +1545,17 @@ handle_xmstate_uns_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1531 1545
1532 tcp_ctask->xmstate |= XMSTATE_UNS_DATA; 1546 tcp_ctask->xmstate |= XMSTATE_UNS_DATA;
1533 if (tcp_ctask->xmstate & XMSTATE_UNS_INIT) { 1547 if (tcp_ctask->xmstate & XMSTATE_UNS_INIT) {
1534 iscsi_unsolicit_data_init(conn, ctask); 1548 dtask = &tcp_ctask->unsol_dtask;
1535 dtask = tcp_ctask->dtask; 1549
1550 iscsi_prep_unsolicit_data_pdu(ctask, &dtask->hdr);
1551 iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)&dtask->hdr,
1552 sizeof(struct iscsi_hdr));
1536 if (conn->hdrdgst_en) 1553 if (conn->hdrdgst_en)
1537 iscsi_hdr_digest(conn, &tcp_ctask->headbuf, 1554 iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
1538 (u8*)dtask->hdrext); 1555 (u8*)dtask->hdrext);
1556
1539 tcp_ctask->xmstate &= ~XMSTATE_UNS_INIT; 1557 tcp_ctask->xmstate &= ~XMSTATE_UNS_INIT;
1558 iscsi_set_padding(tcp_ctask, ctask->data_count);
1540 } 1559 }
1541 1560
1542 rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->data_count); 1561 rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->data_count);
@@ -1546,254 +1565,138 @@ handle_xmstate_uns_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1546 return rc; 1565 return rc;
1547 } 1566 }
1548 1567
1568 if (conn->datadgst_en) {
1569 dtask = &tcp_ctask->unsol_dtask;
1570 iscsi_data_digest_init(ctask->conn->dd_data, tcp_ctask);
1571 dtask->digest = 0;
1572 }
1573
1549 debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n", 1574 debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n",
1550 ctask->itt, ctask->unsol_count, tcp_ctask->sent); 1575 ctask->itt, ctask->unsol_count, tcp_ctask->sent);
1551 return 0; 1576 return 0;
1552} 1577}
1553 1578
1554static inline int 1579static int
1555handle_xmstate_uns_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1580iscsi_send_unsol_pdu(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1556{ 1581{
1557 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1582 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1558 struct iscsi_data_task *dtask = tcp_ctask->dtask;
1559 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1560 int rc; 1583 int rc;
1561 1584
1562 BUG_ON(!ctask->data_count); 1585 if (tcp_ctask->xmstate & XMSTATE_UNS_HDR) {
1563 tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA; 1586 BUG_ON(!ctask->unsol_count);
1564 1587 tcp_ctask->xmstate &= ~XMSTATE_UNS_HDR;
1565 if (conn->datadgst_en) { 1588send_hdr:
1566 iscsi_data_digest_init(tcp_conn, ctask); 1589 rc = iscsi_send_unsol_hdr(conn, ctask);
1567 dtask->digest = 0; 1590 if (rc)
1591 return rc;
1568 } 1592 }
1569 1593
1570 for (;;) { 1594 if (tcp_ctask->xmstate & XMSTATE_UNS_DATA) {
1595 struct iscsi_data_task *dtask = &tcp_ctask->unsol_dtask;
1571 int start = tcp_ctask->sent; 1596 int start = tcp_ctask->sent;
1572 1597
1573 rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, 1598 rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg,
1574 &ctask->data_count, &tcp_ctask->sent); 1599 &tcp_ctask->sent, &ctask->data_count,
1575 if (rc) { 1600 &dtask->digestbuf, &dtask->digest);
1576 ctask->unsol_count -= tcp_ctask->sent - start;
1577 tcp_ctask->xmstate |= XMSTATE_UNS_DATA;
1578 /* will continue with this ctask later.. */
1579 if (conn->datadgst_en) {
1580 crypto_digest_final(tcp_conn->data_tx_tfm,
1581 (u8 *)&dtask->digest);
1582 debug_tcp("tx uns data fail 0x%x\n",
1583 dtask->digest);
1584 }
1585 return rc;
1586 }
1587
1588 BUG_ON(tcp_ctask->sent > ctask->total_length);
1589 ctask->unsol_count -= tcp_ctask->sent - start; 1601 ctask->unsol_count -= tcp_ctask->sent - start;
1590 1602 if (rc)
1603 return rc;
1604 tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA;
1591 /* 1605 /*
1592 * XXX:we may run here with un-initial sendbuf. 1606 * Done with the Data-Out. Next, check if we need
1593 * so pass it 1607 * to send another unsolicited Data-Out.
1594 */ 1608 */
1595 if (conn->datadgst_en && tcp_ctask->sent - start > 0) 1609 if (ctask->unsol_count) {
1596 crypto_digest_update(tcp_conn->data_tx_tfm, 1610 debug_scsi("sending more uns\n");
1597 &tcp_ctask->sendbuf.sg, 1); 1611 tcp_ctask->xmstate |= XMSTATE_UNS_INIT;
1598 1612 goto send_hdr;
1599 if (!ctask->data_count)
1600 break;
1601 iscsi_buf_init_sg(&tcp_ctask->sendbuf,
1602 &tcp_ctask->sg[tcp_ctask->sg_count++]);
1603 }
1604 BUG_ON(ctask->unsol_count < 0);
1605
1606 /*
1607 * Done with the Data-Out. Next, check if we need
1608 * to send another unsolicited Data-Out.
1609 */
1610 if (ctask->unsol_count) {
1611 if (conn->datadgst_en) {
1612 rc = iscsi_digest_final_send(conn, ctask,
1613 &dtask->digestbuf,
1614 &dtask->digest, 1);
1615 if (rc) {
1616 debug_tcp("send uns digest 0x%x fail\n",
1617 dtask->digest);
1618 return rc;
1619 }
1620 debug_tcp("sending uns digest 0x%x, more uns\n",
1621 dtask->digest);
1622 } 1613 }
1623 tcp_ctask->xmstate |= XMSTATE_UNS_INIT;
1624 return 1;
1625 } 1614 }
1626
1627 if (conn->datadgst_en && !(tcp_ctask->xmstate & XMSTATE_W_PAD)) {
1628 rc = iscsi_digest_final_send(conn, ctask,
1629 &dtask->digestbuf,
1630 &dtask->digest, 1);
1631 if (rc) {
1632 debug_tcp("send last uns digest 0x%x fail\n",
1633 dtask->digest);
1634 return rc;
1635 }
1636 debug_tcp("sending uns digest 0x%x\n",dtask->digest);
1637 }
1638
1639 return 0; 1615 return 0;
1640} 1616}
1641 1617
1642static inline int 1618static int iscsi_send_sol_pdu(struct iscsi_conn *conn,
1643handle_xmstate_sol_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1619 struct iscsi_cmd_task *ctask)
1644{ 1620{
1645 struct iscsi_session *session = conn->session;
1646 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1647 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1621 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1648 struct iscsi_r2t_info *r2t = tcp_ctask->r2t; 1622 struct iscsi_session *session = conn->session;
1649 struct iscsi_data_task *dtask = &r2t->dtask; 1623 struct iscsi_r2t_info *r2t;
1624 struct iscsi_data_task *dtask;
1650 int left, rc; 1625 int left, rc;
1651 1626
1652 tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA; 1627 if (tcp_ctask->xmstate & XMSTATE_SOL_HDR) {
1653 tcp_ctask->dtask = dtask; 1628 tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
1654
1655 if (conn->datadgst_en) {
1656 iscsi_data_digest_init(tcp_conn, ctask);
1657 dtask->digest = 0;
1658 }
1659solicit_again:
1660 /*
1661 * send Data-Out within this R2T sequence.
1662 */
1663 if (!r2t->data_count)
1664 goto data_out_done;
1665
1666 rc = iscsi_sendpage(conn, &r2t->sendbuf, &r2t->data_count, &r2t->sent);
1667 if (rc) {
1668 tcp_ctask->xmstate |= XMSTATE_SOL_DATA; 1629 tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
1669 /* will continue with this ctask later.. */ 1630 if (!tcp_ctask->r2t)
1670 if (conn->datadgst_en) { 1631 __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
1671 crypto_digest_final(tcp_conn->data_tx_tfm, 1632 sizeof(void*));
1672 (u8 *)&dtask->digest); 1633send_hdr:
1673 debug_tcp("r2t data send fail 0x%x\n", dtask->digest); 1634 r2t = tcp_ctask->r2t;
1674 } 1635 dtask = &r2t->dtask;
1675 return rc;
1676 }
1677 1636
1678 BUG_ON(r2t->data_count < 0); 1637 if (conn->hdrdgst_en)
1679 if (conn->datadgst_en) 1638 iscsi_hdr_digest(conn, &r2t->headbuf,
1680 crypto_digest_update(tcp_conn->data_tx_tfm, &r2t->sendbuf.sg, 1639 (u8*)dtask->hdrext);
1681 1); 1640 rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count);
1682 1641 if (rc) {
1683 if (r2t->data_count) { 1642 tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA;
1684 BUG_ON(ctask->sc->use_sg == 0); 1643 tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
1685 if (!iscsi_buf_left(&r2t->sendbuf)) { 1644 return rc;
1686 BUG_ON(tcp_ctask->bad_sg == r2t->sg);
1687 iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
1688 r2t->sg += 1;
1689 } 1645 }
1690 goto solicit_again;
1691 }
1692 1646
1693data_out_done:
1694 /*
1695 * Done with this Data-Out. Next, check if we have
1696 * to send another Data-Out for this R2T.
1697 */
1698 BUG_ON(r2t->data_length - r2t->sent < 0);
1699 left = r2t->data_length - r2t->sent;
1700 if (left) {
1701 if (conn->datadgst_en) { 1647 if (conn->datadgst_en) {
1702 rc = iscsi_digest_final_send(conn, ctask, 1648 iscsi_data_digest_init(conn->dd_data, tcp_ctask);
1703 &dtask->digestbuf, 1649 dtask->digest = 0;
1704 &dtask->digest, 1);
1705 if (rc) {
1706 debug_tcp("send r2t data digest 0x%x"
1707 "fail\n", dtask->digest);
1708 return rc;
1709 }
1710 debug_tcp("r2t data send digest 0x%x\n",
1711 dtask->digest);
1712 } 1650 }
1713 iscsi_solicit_data_cont(conn, ctask, r2t, left);
1714 tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
1715 tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
1716 return 1;
1717 }
1718 1651
1719 /* 1652 iscsi_set_padding(tcp_ctask, r2t->data_count);
1720 * Done with this R2T. Check if there are more 1653 debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n",
1721 * outstanding R2Ts ready to be processed. 1654 r2t->solicit_datasn - 1, ctask->itt, r2t->data_count,
1722 */ 1655 r2t->sent);
1723 BUG_ON(tcp_ctask->r2t_data_count - r2t->data_length < 0);
1724 if (conn->datadgst_en) {
1725 rc = iscsi_digest_final_send(conn, ctask, &dtask->digestbuf,
1726 &dtask->digest, 1);
1727 if (rc) {
1728 debug_tcp("send last r2t data digest 0x%x"
1729 "fail\n", dtask->digest);
1730 return rc;
1731 }
1732 debug_tcp("r2t done dout digest 0x%x\n", dtask->digest);
1733 }
1734
1735 tcp_ctask->r2t_data_count -= r2t->data_length;
1736 tcp_ctask->r2t = NULL;
1737 spin_lock_bh(&session->lock);
1738 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
1739 spin_unlock_bh(&session->lock);
1740 if (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
1741 tcp_ctask->r2t = r2t;
1742 tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
1743 tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
1744 return 1;
1745 } 1656 }
1746 1657
1747 return 0; 1658 if (tcp_ctask->xmstate & XMSTATE_SOL_DATA) {
1748} 1659 r2t = tcp_ctask->r2t;
1660 dtask = &r2t->dtask;
1749 1661
1750static inline int 1662 rc = iscsi_send_data(ctask, &r2t->sendbuf, &r2t->sg,
1751handle_xmstate_w_pad(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1663 &r2t->sent, &r2t->data_count,
1752{ 1664 &dtask->digestbuf, &dtask->digest);
1753 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1665 if (rc)
1754 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1666 return rc;
1755 struct iscsi_data_task *dtask = tcp_ctask->dtask; 1667 tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA;
1756 int sent = 0, rc;
1757 1668
1758 tcp_ctask->xmstate &= ~XMSTATE_W_PAD; 1669 /*
1759 iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad, 1670 * Done with this Data-Out. Next, check if we have
1760 tcp_ctask->pad_count); 1671 * to send another Data-Out for this R2T.
1761 rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, &tcp_ctask->pad_count, 1672 */
1762 &sent); 1673 BUG_ON(r2t->data_length - r2t->sent < 0);
1763 if (rc) { 1674 left = r2t->data_length - r2t->sent;
1764 tcp_ctask->xmstate |= XMSTATE_W_PAD; 1675 if (left) {
1765 return rc; 1676 iscsi_solicit_data_cont(conn, ctask, r2t, left);
1766 } 1677 tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
1678 tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
1679 goto send_hdr;
1680 }
1767 1681
1768 if (conn->datadgst_en) { 1682 /*
1769 crypto_digest_update(tcp_conn->data_tx_tfm, 1683 * Done with this R2T. Check if there are more
1770 &tcp_ctask->sendbuf.sg, 1); 1684 * outstanding R2Ts ready to be processed.
1771 /* imm data? */ 1685 */
1772 if (!dtask) { 1686 spin_lock_bh(&session->lock);
1773 rc = iscsi_digest_final_send(conn, ctask, 1687 tcp_ctask->r2t = NULL;
1774 &tcp_ctask->immbuf, 1688 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
1775 &tcp_ctask->immdigest, 1); 1689 sizeof(void*));
1776 if (rc) { 1690 if (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t,
1777 debug_tcp("send padding digest 0x%x" 1691 sizeof(void*))) {
1778 "fail!\n", tcp_ctask->immdigest); 1692 tcp_ctask->r2t = r2t;
1779 return rc; 1693 tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
1780 } 1694 tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
1781 debug_tcp("done with padding, digest 0x%x\n", 1695 spin_unlock_bh(&session->lock);
1782 tcp_ctask->datadigest); 1696 goto send_hdr;
1783 } else {
1784 rc = iscsi_digest_final_send(conn, ctask,
1785 &dtask->digestbuf,
1786 &dtask->digest, 1);
1787 if (rc) {
1788 debug_tcp("send padding digest 0x%x"
1789 "fail\n", dtask->digest);
1790 return rc;
1791 }
1792 debug_tcp("done with padding, digest 0x%x\n",
1793 dtask->digest);
1794 } 1697 }
1698 spin_unlock_bh(&session->lock);
1795 } 1699 }
1796
1797 return 0; 1700 return 0;
1798} 1701}
1799 1702
@@ -1813,85 +1716,30 @@ iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1813 return rc; 1716 return rc;
1814 1717
1815 if (tcp_ctask->xmstate & XMSTATE_R_HDR) 1718 if (tcp_ctask->xmstate & XMSTATE_R_HDR)
1816 return handle_xmstate_r_hdr(conn, tcp_ctask); 1719 return iscsi_send_read_hdr(conn, tcp_ctask);
1817 1720
1818 if (tcp_ctask->xmstate & XMSTATE_W_HDR) { 1721 if (tcp_ctask->xmstate & XMSTATE_W_HDR) {
1819 rc = handle_xmstate_w_hdr(conn, ctask); 1722 rc = iscsi_send_write_hdr(conn, ctask);
1820 if (rc)
1821 return rc;
1822 }
1823
1824 /* XXX: for data digest xmit recover */
1825 if (tcp_ctask->xmstate & XMSTATE_DATA_DIGEST) {
1826 rc = handle_xmstate_data_digest(conn, ctask);
1827 if (rc) 1723 if (rc)
1828 return rc; 1724 return rc;
1829 } 1725 }
1830 1726
1831 if (tcp_ctask->xmstate & XMSTATE_IMM_DATA) { 1727 if (tcp_ctask->xmstate & XMSTATE_IMM_DATA) {
1832 rc = handle_xmstate_imm_data(conn, ctask); 1728 rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg,
1729 &tcp_ctask->sent, &ctask->imm_count,
1730 &tcp_ctask->immbuf, &tcp_ctask->immdigest);
1833 if (rc) 1731 if (rc)
1834 return rc; 1732 return rc;
1733 tcp_ctask->xmstate &= ~XMSTATE_IMM_DATA;
1835 } 1734 }
1836 1735
1837 if (tcp_ctask->xmstate & XMSTATE_UNS_HDR) { 1736 rc = iscsi_send_unsol_pdu(conn, ctask);
1838 BUG_ON(!ctask->unsol_count); 1737 if (rc)
1839 tcp_ctask->xmstate &= ~XMSTATE_UNS_HDR; 1738 return rc;
1840unsolicit_head_again:
1841 rc = handle_xmstate_uns_hdr(conn, ctask);
1842 if (rc)
1843 return rc;
1844 }
1845
1846 if (tcp_ctask->xmstate & XMSTATE_UNS_DATA) {
1847 rc = handle_xmstate_uns_data(conn, ctask);
1848 if (rc == 1)
1849 goto unsolicit_head_again;
1850 else if (rc)
1851 return rc;
1852 goto done;
1853 }
1854
1855 if (tcp_ctask->xmstate & XMSTATE_SOL_HDR) {
1856 struct iscsi_r2t_info *r2t;
1857
1858 tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
1859 tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
1860 if (!tcp_ctask->r2t)
1861 __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
1862 sizeof(void*));
1863solicit_head_again:
1864 r2t = tcp_ctask->r2t;
1865 if (conn->hdrdgst_en)
1866 iscsi_hdr_digest(conn, &r2t->headbuf,
1867 (u8*)r2t->dtask.hdrext);
1868 rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count);
1869 if (rc) {
1870 tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA;
1871 tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
1872 return rc;
1873 }
1874
1875 debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n",
1876 r2t->solicit_datasn - 1, ctask->itt, r2t->data_count,
1877 r2t->sent);
1878 }
1879
1880 if (tcp_ctask->xmstate & XMSTATE_SOL_DATA) {
1881 rc = handle_xmstate_sol_data(conn, ctask);
1882 if (rc == 1)
1883 goto solicit_head_again;
1884 if (rc)
1885 return rc;
1886 }
1887 1739
1888done: 1740 rc = iscsi_send_sol_pdu(conn, ctask);
1889 /* 1741 if (rc)
1890 * Last thing to check is whether we need to send write 1742 return rc;
1891 * padding. Note that we check for xmstate equality, not just the bit.
1892 */
1893 if (tcp_ctask->xmstate == XMSTATE_W_PAD)
1894 rc = handle_xmstate_w_pad(conn, ctask);
1895 1743
1896 return rc; 1744 return rc;
1897} 1745}
@@ -1923,8 +1771,24 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1923 /* initial operational parameters */ 1771 /* initial operational parameters */
1924 tcp_conn->hdr_size = sizeof(struct iscsi_hdr); 1772 tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
1925 1773
1774 tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
1775 CRYPTO_ALG_ASYNC);
1776 tcp_conn->tx_hash.flags = 0;
1777 if (!tcp_conn->tx_hash.tfm)
1778 goto free_tcp_conn;
1779
1780 tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
1781 CRYPTO_ALG_ASYNC);
1782 tcp_conn->rx_hash.flags = 0;
1783 if (!tcp_conn->rx_hash.tfm)
1784 goto free_tx_tfm;
1785
1926 return cls_conn; 1786 return cls_conn;
1927 1787
1788free_tx_tfm:
1789 crypto_free_hash(tcp_conn->tx_hash.tfm);
1790free_tcp_conn:
1791 kfree(tcp_conn);
1928tcp_conn_alloc_fail: 1792tcp_conn_alloc_fail:
1929 iscsi_conn_teardown(cls_conn); 1793 iscsi_conn_teardown(cls_conn);
1930 return NULL; 1794 return NULL;
@@ -1962,14 +1826,10 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
1962 1826
1963 /* now free tcp_conn */ 1827 /* now free tcp_conn */
1964 if (digest) { 1828 if (digest) {
1965 if (tcp_conn->tx_tfm) 1829 if (tcp_conn->tx_hash.tfm)
1966 crypto_free_tfm(tcp_conn->tx_tfm); 1830 crypto_free_hash(tcp_conn->tx_hash.tfm);
1967 if (tcp_conn->rx_tfm) 1831 if (tcp_conn->rx_hash.tfm)
1968 crypto_free_tfm(tcp_conn->rx_tfm); 1832 crypto_free_hash(tcp_conn->rx_hash.tfm);
1969 if (tcp_conn->data_tx_tfm)
1970 crypto_free_tfm(tcp_conn->data_tx_tfm);
1971 if (tcp_conn->data_rx_tfm)
1972 crypto_free_tfm(tcp_conn->data_rx_tfm);
1973 } 1833 }
1974 1834
1975 kfree(tcp_conn); 1835 kfree(tcp_conn);
@@ -1979,9 +1839,11 @@ static void
1979iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) 1839iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
1980{ 1840{
1981 struct iscsi_conn *conn = cls_conn->dd_data; 1841 struct iscsi_conn *conn = cls_conn->dd_data;
1842 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1982 1843
1983 iscsi_conn_stop(cls_conn, flag); 1844 iscsi_conn_stop(cls_conn, flag);
1984 iscsi_tcp_release_conn(conn); 1845 iscsi_tcp_release_conn(conn);
1846 tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
1985} 1847}
1986 1848
1987static int 1849static int
@@ -2127,48 +1989,11 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
2127 case ISCSI_PARAM_HDRDGST_EN: 1989 case ISCSI_PARAM_HDRDGST_EN:
2128 iscsi_set_param(cls_conn, param, buf, buflen); 1990 iscsi_set_param(cls_conn, param, buf, buflen);
2129 tcp_conn->hdr_size = sizeof(struct iscsi_hdr); 1991 tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
2130 if (conn->hdrdgst_en) { 1992 if (conn->hdrdgst_en)
2131 tcp_conn->hdr_size += sizeof(__u32); 1993 tcp_conn->hdr_size += sizeof(__u32);
2132 if (!tcp_conn->tx_tfm)
2133 tcp_conn->tx_tfm = crypto_alloc_tfm("crc32c",
2134 0);
2135 if (!tcp_conn->tx_tfm)
2136 return -ENOMEM;
2137 if (!tcp_conn->rx_tfm)
2138 tcp_conn->rx_tfm = crypto_alloc_tfm("crc32c",
2139 0);
2140 if (!tcp_conn->rx_tfm) {
2141 crypto_free_tfm(tcp_conn->tx_tfm);
2142 return -ENOMEM;
2143 }
2144 } else {
2145 if (tcp_conn->tx_tfm)
2146 crypto_free_tfm(tcp_conn->tx_tfm);
2147 if (tcp_conn->rx_tfm)
2148 crypto_free_tfm(tcp_conn->rx_tfm);
2149 }
2150 break; 1994 break;
2151 case ISCSI_PARAM_DATADGST_EN: 1995 case ISCSI_PARAM_DATADGST_EN:
2152 iscsi_set_param(cls_conn, param, buf, buflen); 1996 iscsi_set_param(cls_conn, param, buf, buflen);
2153 if (conn->datadgst_en) {
2154 if (!tcp_conn->data_tx_tfm)
2155 tcp_conn->data_tx_tfm =
2156 crypto_alloc_tfm("crc32c", 0);
2157 if (!tcp_conn->data_tx_tfm)
2158 return -ENOMEM;
2159 if (!tcp_conn->data_rx_tfm)
2160 tcp_conn->data_rx_tfm =
2161 crypto_alloc_tfm("crc32c", 0);
2162 if (!tcp_conn->data_rx_tfm) {
2163 crypto_free_tfm(tcp_conn->data_tx_tfm);
2164 return -ENOMEM;
2165 }
2166 } else {
2167 if (tcp_conn->data_tx_tfm)
2168 crypto_free_tfm(tcp_conn->data_tx_tfm);
2169 if (tcp_conn->data_rx_tfm)
2170 crypto_free_tfm(tcp_conn->data_rx_tfm);
2171 }
2172 tcp_conn->sendpage = conn->datadgst_en ? 1997 tcp_conn->sendpage = conn->datadgst_en ?
2173 sock_no_sendpage : tcp_conn->sock->ops->sendpage; 1998 sock_no_sendpage : tcp_conn->sock->ops->sendpage;
2174 break; 1999 break;
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 6a4ee704e46e..32736831790e 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -31,26 +31,25 @@
31#define IN_PROGRESS_DDIGEST_RECV 0x3 31#define IN_PROGRESS_DDIGEST_RECV 0x3
32 32
33/* xmit state machine */ 33/* xmit state machine */
34#define XMSTATE_IDLE 0x0 34#define XMSTATE_IDLE 0x0
35#define XMSTATE_R_HDR 0x1 35#define XMSTATE_R_HDR 0x1
36#define XMSTATE_W_HDR 0x2 36#define XMSTATE_W_HDR 0x2
37#define XMSTATE_IMM_HDR 0x4 37#define XMSTATE_IMM_HDR 0x4
38#define XMSTATE_IMM_DATA 0x8 38#define XMSTATE_IMM_DATA 0x8
39#define XMSTATE_UNS_INIT 0x10 39#define XMSTATE_UNS_INIT 0x10
40#define XMSTATE_UNS_HDR 0x20 40#define XMSTATE_UNS_HDR 0x20
41#define XMSTATE_UNS_DATA 0x40 41#define XMSTATE_UNS_DATA 0x40
42#define XMSTATE_SOL_HDR 0x80 42#define XMSTATE_SOL_HDR 0x80
43#define XMSTATE_SOL_DATA 0x100 43#define XMSTATE_SOL_DATA 0x100
44#define XMSTATE_W_PAD 0x200 44#define XMSTATE_W_PAD 0x200
45#define XMSTATE_DATA_DIGEST 0x400 45#define XMSTATE_W_RESEND_PAD 0x400
46 46#define XMSTATE_W_RESEND_DATA_DIGEST 0x800
47#define ISCSI_CONN_RCVBUF_MIN 262144 47
48#define ISCSI_CONN_SNDBUF_MIN 262144
49#define ISCSI_PAD_LEN 4 48#define ISCSI_PAD_LEN 4
50#define ISCSI_R2T_MAX 16
51#define ISCSI_SG_TABLESIZE SG_ALL 49#define ISCSI_SG_TABLESIZE SG_ALL
52#define ISCSI_TCP_MAX_CMD_LEN 16 50#define ISCSI_TCP_MAX_CMD_LEN 16
53 51
52struct crypto_hash;
54struct socket; 53struct socket;
55 54
56/* Socket connection recieve helper */ 55/* Socket connection recieve helper */
@@ -84,9 +83,6 @@ struct iscsi_tcp_conn {
84 /* iSCSI connection-wide sequencing */ 83 /* iSCSI connection-wide sequencing */
85 int hdr_size; /* PDU header size */ 84 int hdr_size; /* PDU header size */
86 85
87 struct crypto_tfm *rx_tfm; /* CRC32C (Rx) */
88 struct crypto_tfm *data_rx_tfm; /* CRC32C (Rx) for data */
89
90 /* control data */ 86 /* control data */
91 struct iscsi_tcp_recv in; /* TCP receive context */ 87 struct iscsi_tcp_recv in; /* TCP receive context */
92 int in_progress; /* connection state machine */ 88 int in_progress; /* connection state machine */
@@ -96,9 +92,9 @@ struct iscsi_tcp_conn {
96 void (*old_state_change)(struct sock *); 92 void (*old_state_change)(struct sock *);
97 void (*old_write_space)(struct sock *); 93 void (*old_write_space)(struct sock *);
98 94
99 /* xmit */ 95 /* data and header digests */
100 struct crypto_tfm *tx_tfm; /* CRC32C (Tx) */ 96 struct hash_desc tx_hash; /* CRC32C (Tx) */
101 struct crypto_tfm *data_tx_tfm; /* CRC32C (Tx) for data */ 97 struct hash_desc rx_hash; /* CRC32C (Rx) */
102 98
103 /* MIB custom statistics */ 99 /* MIB custom statistics */
104 uint32_t sendpage_failures_cnt; 100 uint32_t sendpage_failures_cnt;
@@ -157,19 +153,15 @@ struct iscsi_tcp_cmd_task {
157 struct scatterlist *bad_sg; /* assert statement */ 153 struct scatterlist *bad_sg; /* assert statement */
158 int sg_count; /* SG's to process */ 154 int sg_count; /* SG's to process */
159 uint32_t exp_r2tsn; 155 uint32_t exp_r2tsn;
160 int r2t_data_count; /* R2T Data-Out bytes */
161 int data_offset; 156 int data_offset;
162 struct iscsi_r2t_info *r2t; /* in progress R2T */ 157 struct iscsi_r2t_info *r2t; /* in progress R2T */
163 struct iscsi_queue r2tpool; 158 struct iscsi_queue r2tpool;
164 struct kfifo *r2tqueue; 159 struct kfifo *r2tqueue;
165 struct iscsi_r2t_info **r2ts; 160 struct iscsi_r2t_info **r2ts;
166 uint32_t datadigest; /* for recover digest */
167 int digest_count; 161 int digest_count;
168 uint32_t immdigest; /* for imm data */ 162 uint32_t immdigest; /* for imm data */
169 struct iscsi_buf immbuf; /* for imm data digest */ 163 struct iscsi_buf immbuf; /* for imm data digest */
170 struct iscsi_data_task *dtask; /* data task in progress*/
171 struct iscsi_data_task unsol_dtask; /* unsol data task */ 164 struct iscsi_data_task unsol_dtask; /* unsol data task */
172 int digest_offset; /* for partial buff digest */
173}; 165};
174 166
175#endif /* ISCSI_H */ 167#endif /* ISCSI_H */
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 5884cd26d53a..c542d0e95e68 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -68,8 +68,7 @@ iscsi_check_assign_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
68EXPORT_SYMBOL_GPL(iscsi_check_assign_cmdsn); 68EXPORT_SYMBOL_GPL(iscsi_check_assign_cmdsn);
69 69
70void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask, 70void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
71 struct iscsi_data *hdr, 71 struct iscsi_data *hdr)
72 int transport_data_cnt)
73{ 72{
74 struct iscsi_conn *conn = ctask->conn; 73 struct iscsi_conn *conn = ctask->conn;
75 74
@@ -82,14 +81,12 @@ void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
82 81
83 hdr->itt = ctask->hdr->itt; 82 hdr->itt = ctask->hdr->itt;
84 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); 83 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
85 84 hdr->offset = cpu_to_be32(ctask->unsol_offset);
86 hdr->offset = cpu_to_be32(ctask->total_length -
87 transport_data_cnt -
88 ctask->unsol_count);
89 85
90 if (ctask->unsol_count > conn->max_xmit_dlength) { 86 if (ctask->unsol_count > conn->max_xmit_dlength) {
91 hton24(hdr->dlength, conn->max_xmit_dlength); 87 hton24(hdr->dlength, conn->max_xmit_dlength);
92 ctask->data_count = conn->max_xmit_dlength; 88 ctask->data_count = conn->max_xmit_dlength;
89 ctask->unsol_offset += ctask->data_count;
93 hdr->flags = 0; 90 hdr->flags = 0;
94 } else { 91 } else {
95 hton24(hdr->dlength, ctask->unsol_count); 92 hton24(hdr->dlength, ctask->unsol_count);
@@ -125,6 +122,7 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
125 memcpy(hdr->cdb, sc->cmnd, sc->cmd_len); 122 memcpy(hdr->cdb, sc->cmnd, sc->cmd_len);
126 memset(&hdr->cdb[sc->cmd_len], 0, MAX_COMMAND_SIZE - sc->cmd_len); 123 memset(&hdr->cdb[sc->cmd_len], 0, MAX_COMMAND_SIZE - sc->cmd_len);
127 124
125 ctask->data_count = 0;
128 if (sc->sc_data_direction == DMA_TO_DEVICE) { 126 if (sc->sc_data_direction == DMA_TO_DEVICE) {
129 hdr->flags |= ISCSI_FLAG_CMD_WRITE; 127 hdr->flags |= ISCSI_FLAG_CMD_WRITE;
130 /* 128 /*
@@ -143,6 +141,7 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
143 */ 141 */
144 ctask->imm_count = 0; 142 ctask->imm_count = 0;
145 ctask->unsol_count = 0; 143 ctask->unsol_count = 0;
144 ctask->unsol_offset = 0;
146 ctask->unsol_datasn = 0; 145 ctask->unsol_datasn = 0;
147 146
148 if (session->imm_data_en) { 147 if (session->imm_data_en) {
@@ -156,9 +155,12 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
156 } else 155 } else
157 zero_data(ctask->hdr->dlength); 156 zero_data(ctask->hdr->dlength);
158 157
159 if (!session->initial_r2t_en) 158 if (!session->initial_r2t_en) {
160 ctask->unsol_count = min(session->first_burst, 159 ctask->unsol_count = min(session->first_burst,
161 ctask->total_length) - ctask->imm_count; 160 ctask->total_length) - ctask->imm_count;
161 ctask->unsol_offset = ctask->imm_count;
162 }
163
162 if (!ctask->unsol_count) 164 if (!ctask->unsol_count)
163 /* No unsolicit Data-Out's */ 165 /* No unsolicit Data-Out's */
164 ctask->hdr->flags |= ISCSI_FLAG_CMD_FINAL; 166 ctask->hdr->flags |= ISCSI_FLAG_CMD_FINAL;
@@ -177,25 +179,51 @@ EXPORT_SYMBOL_GPL(iscsi_prep_scsi_cmd_pdu);
177 179
178/** 180/**
179 * iscsi_complete_command - return command back to scsi-ml 181 * iscsi_complete_command - return command back to scsi-ml
180 * @session: iscsi session
181 * @ctask: iscsi cmd task 182 * @ctask: iscsi cmd task
182 * 183 *
183 * Must be called with session lock. 184 * Must be called with session lock.
184 * This function returns the scsi command to scsi-ml and returns 185 * This function returns the scsi command to scsi-ml and returns
185 * the cmd task to the pool of available cmd tasks. 186 * the cmd task to the pool of available cmd tasks.
186 */ 187 */
187static void iscsi_complete_command(struct iscsi_session *session, 188static void iscsi_complete_command(struct iscsi_cmd_task *ctask)
188 struct iscsi_cmd_task *ctask)
189{ 189{
190 struct iscsi_session *session = ctask->conn->session;
190 struct scsi_cmnd *sc = ctask->sc; 191 struct scsi_cmnd *sc = ctask->sc;
191 192
192 ctask->state = ISCSI_TASK_COMPLETED; 193 ctask->state = ISCSI_TASK_COMPLETED;
193 ctask->sc = NULL; 194 ctask->sc = NULL;
195 /* SCSI eh reuses commands to verify us */
196 sc->SCp.ptr = NULL;
194 list_del_init(&ctask->running); 197 list_del_init(&ctask->running);
195 __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*)); 198 __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
196 sc->scsi_done(sc); 199 sc->scsi_done(sc);
197} 200}
198 201
202static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask)
203{
204 atomic_inc(&ctask->refcount);
205}
206
207static void iscsi_get_ctask(struct iscsi_cmd_task *ctask)
208{
209 spin_lock_bh(&ctask->conn->session->lock);
210 __iscsi_get_ctask(ctask);
211 spin_unlock_bh(&ctask->conn->session->lock);
212}
213
214static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask)
215{
216 if (atomic_dec_and_test(&ctask->refcount))
217 iscsi_complete_command(ctask);
218}
219
220static void iscsi_put_ctask(struct iscsi_cmd_task *ctask)
221{
222 spin_lock_bh(&ctask->conn->session->lock);
223 __iscsi_put_ctask(ctask);
224 spin_unlock_bh(&ctask->conn->session->lock);
225}
226
199/** 227/**
200 * iscsi_cmd_rsp - SCSI Command Response processing 228 * iscsi_cmd_rsp - SCSI Command Response processing
201 * @conn: iscsi connection 229 * @conn: iscsi connection
@@ -272,7 +300,7 @@ out:
272 (long)sc, sc->result, ctask->itt); 300 (long)sc, sc->result, ctask->itt);
273 conn->scsirsp_pdus_cnt++; 301 conn->scsirsp_pdus_cnt++;
274 302
275 iscsi_complete_command(conn->session, ctask); 303 __iscsi_put_ctask(ctask);
276 return rc; 304 return rc;
277} 305}
278 306
@@ -295,6 +323,30 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
295 wake_up(&conn->ehwait); 323 wake_up(&conn->ehwait);
296} 324}
297 325
326static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
327 char *data, int datalen)
328{
329 struct iscsi_reject *reject = (struct iscsi_reject *)hdr;
330 struct iscsi_hdr rejected_pdu;
331 uint32_t itt;
332
333 conn->exp_statsn = be32_to_cpu(reject->statsn) + 1;
334
335 if (reject->reason == ISCSI_REASON_DATA_DIGEST_ERROR) {
336 if (ntoh24(reject->dlength) > datalen)
337 return ISCSI_ERR_PROTO;
338
339 if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) {
340 memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
341 itt = rejected_pdu.itt & ISCSI_ITT_MASK;
342 printk(KERN_ERR "itt 0x%x had pdu (op 0x%x) rejected "
343 "due to DataDigest error.\n", itt,
344 rejected_pdu.opcode);
345 }
346 }
347 return 0;
348}
349
298/** 350/**
299 * __iscsi_complete_pdu - complete pdu 351 * __iscsi_complete_pdu - complete pdu
300 * @conn: iscsi conn 352 * @conn: iscsi conn
@@ -336,7 +388,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
336 BUG_ON((void*)ctask != ctask->sc->SCp.ptr); 388 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
337 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) { 389 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
338 conn->scsirsp_pdus_cnt++; 390 conn->scsirsp_pdus_cnt++;
339 iscsi_complete_command(session, ctask); 391 __iscsi_put_ctask(ctask);
340 } 392 }
341 break; 393 break;
342 case ISCSI_OP_R2T: 394 case ISCSI_OP_R2T:
@@ -406,6 +458,11 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
406 break; 458 break;
407 } 459 }
408 } else if (itt == ISCSI_RESERVED_TAG) { 460 } else if (itt == ISCSI_RESERVED_TAG) {
461 rc = iscsi_check_assign_cmdsn(session,
462 (struct iscsi_nopin*)hdr);
463 if (rc)
464 goto done;
465
409 switch(opcode) { 466 switch(opcode) {
410 case ISCSI_OP_NOOP_IN: 467 case ISCSI_OP_NOOP_IN:
411 if (datalen) { 468 if (datalen) {
@@ -413,11 +470,6 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
413 break; 470 break;
414 } 471 }
415 472
416 rc = iscsi_check_assign_cmdsn(session,
417 (struct iscsi_nopin*)hdr);
418 if (rc)
419 break;
420
421 if (hdr->ttt == ISCSI_RESERVED_TAG) 473 if (hdr->ttt == ISCSI_RESERVED_TAG)
422 break; 474 break;
423 475
@@ -425,7 +477,8 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
425 rc = ISCSI_ERR_CONN_FAILED; 477 rc = ISCSI_ERR_CONN_FAILED;
426 break; 478 break;
427 case ISCSI_OP_REJECT: 479 case ISCSI_OP_REJECT:
428 /* we need sth like iscsi_reject_rsp()*/ 480 rc = iscsi_handle_reject(conn, hdr, data, datalen);
481 break;
429 case ISCSI_OP_ASYNC_EVENT: 482 case ISCSI_OP_ASYNC_EVENT:
430 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 483 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
431 /* we need sth like iscsi_async_event_rsp() */ 484 /* we need sth like iscsi_async_event_rsp() */
@@ -561,7 +614,9 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
561 BUG_ON(conn->ctask && conn->mtask); 614 BUG_ON(conn->ctask && conn->mtask);
562 615
563 if (conn->ctask) { 616 if (conn->ctask) {
617 iscsi_get_ctask(conn->ctask);
564 rc = tt->xmit_cmd_task(conn, conn->ctask); 618 rc = tt->xmit_cmd_task(conn, conn->ctask);
619 iscsi_put_ctask(conn->ctask);
565 if (rc) 620 if (rc)
566 goto again; 621 goto again;
567 /* done with this in-progress ctask */ 622 /* done with this in-progress ctask */
@@ -602,12 +657,19 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
602 struct iscsi_cmd_task, running); 657 struct iscsi_cmd_task, running);
603 conn->ctask->state = ISCSI_TASK_RUNNING; 658 conn->ctask->state = ISCSI_TASK_RUNNING;
604 list_move_tail(conn->xmitqueue.next, &conn->run_list); 659 list_move_tail(conn->xmitqueue.next, &conn->run_list);
660 __iscsi_get_ctask(conn->ctask);
605 spin_unlock_bh(&conn->session->lock); 661 spin_unlock_bh(&conn->session->lock);
606 662
607 rc = tt->xmit_cmd_task(conn, conn->ctask); 663 rc = tt->xmit_cmd_task(conn, conn->ctask);
608 if (rc) 664 if (rc)
609 goto again; 665 goto again;
666
610 spin_lock_bh(&conn->session->lock); 667 spin_lock_bh(&conn->session->lock);
668 __iscsi_put_ctask(conn->ctask);
669 if (rc) {
670 spin_unlock_bh(&conn->session->lock);
671 goto again;
672 }
611 } 673 }
612 spin_unlock_bh(&conn->session->lock); 674 spin_unlock_bh(&conn->session->lock);
613 /* done with this ctask */ 675 /* done with this ctask */
@@ -657,6 +719,7 @@ enum {
657 FAILURE_SESSION_FAILED, 719 FAILURE_SESSION_FAILED,
658 FAILURE_SESSION_FREED, 720 FAILURE_SESSION_FREED,
659 FAILURE_WINDOW_CLOSED, 721 FAILURE_WINDOW_CLOSED,
722 FAILURE_OOM,
660 FAILURE_SESSION_TERMINATE, 723 FAILURE_SESSION_TERMINATE,
661 FAILURE_SESSION_IN_RECOVERY, 724 FAILURE_SESSION_IN_RECOVERY,
662 FAILURE_SESSION_RECOVERY_TIMEOUT, 725 FAILURE_SESSION_RECOVERY_TIMEOUT,
@@ -672,6 +735,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
672 735
673 sc->scsi_done = done; 736 sc->scsi_done = done;
674 sc->result = 0; 737 sc->result = 0;
738 sc->SCp.ptr = NULL;
675 739
676 host = sc->device->host; 740 host = sc->device->host;
677 session = iscsi_hostdata(host->hostdata); 741 session = iscsi_hostdata(host->hostdata);
@@ -715,10 +779,15 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
715 779
716 conn = session->leadconn; 780 conn = session->leadconn;
717 781
718 __kfifo_get(session->cmdpool.queue, (void*)&ctask, sizeof(void*)); 782 if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask,
783 sizeof(void*))) {
784 reason = FAILURE_OOM;
785 goto reject;
786 }
719 sc->SCp.phase = session->age; 787 sc->SCp.phase = session->age;
720 sc->SCp.ptr = (char *)ctask; 788 sc->SCp.ptr = (char *)ctask;
721 789
790 atomic_set(&ctask->refcount, 1);
722 ctask->state = ISCSI_TASK_PENDING; 791 ctask->state = ISCSI_TASK_PENDING;
723 ctask->mtask = NULL; 792 ctask->mtask = NULL;
724 ctask->conn = conn; 793 ctask->conn = conn;
@@ -731,9 +800,10 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
731 800
732 list_add_tail(&ctask->running, &conn->xmitqueue); 801 list_add_tail(&ctask->running, &conn->xmitqueue);
733 debug_scsi( 802 debug_scsi(
734 "ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n", 803 "ctask enq [%s cid %d sc %p cdb 0x%x itt 0x%x len %d cmdsn %d "
804 "win %d]\n",
735 sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", 805 sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
736 conn->id, (long)sc, ctask->itt, sc->request_bufflen, 806 conn->id, sc, sc->cmnd[0], ctask->itt, sc->request_bufflen,
737 session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1); 807 session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
738 spin_unlock(&session->lock); 808 spin_unlock(&session->lock);
739 809
@@ -1061,16 +1131,30 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1061 1131
1062 sc->result = err; 1132 sc->result = err;
1063 sc->resid = sc->request_bufflen; 1133 sc->resid = sc->request_bufflen;
1064 iscsi_complete_command(conn->session, ctask); 1134 /* release ref from queuecommand */
1135 __iscsi_put_ctask(ctask);
1065} 1136}
1066 1137
1067int iscsi_eh_abort(struct scsi_cmnd *sc) 1138int iscsi_eh_abort(struct scsi_cmnd *sc)
1068{ 1139{
1069 struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr; 1140 struct iscsi_cmd_task *ctask;
1070 struct iscsi_conn *conn = ctask->conn; 1141 struct iscsi_conn *conn;
1071 struct iscsi_session *session = conn->session; 1142 struct iscsi_session *session;
1072 int rc; 1143 int rc;
1073 1144
1145 /*
1146 * if session was ISCSI_STATE_IN_RECOVERY then we may not have
1147 * got the command.
1148 */
1149 if (!sc->SCp.ptr) {
1150 debug_scsi("sc never reached iscsi layer or it completed.\n");
1151 return SUCCESS;
1152 }
1153
1154 ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
1155 conn = ctask->conn;
1156 session = conn->session;
1157
1074 conn->eh_abort_cnt++; 1158 conn->eh_abort_cnt++;
1075 debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt); 1159 debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt);
1076 1160
@@ -1520,11 +1604,19 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
1520 struct iscsi_conn *conn = cls_conn->dd_data; 1604 struct iscsi_conn *conn = cls_conn->dd_data;
1521 struct iscsi_session *session = conn->session; 1605 struct iscsi_session *session = conn->session;
1522 1606
1523 if (session == NULL) { 1607 if (!session) {
1524 printk(KERN_ERR "iscsi: can't start unbound connection\n"); 1608 printk(KERN_ERR "iscsi: can't start unbound connection\n");
1525 return -EPERM; 1609 return -EPERM;
1526 } 1610 }
1527 1611
1612 if ((session->imm_data_en || !session->initial_r2t_en) &&
1613 session->first_burst > session->max_burst) {
1614 printk("iscsi: invalid burst lengths: "
1615 "first_burst %d max_burst %d\n",
1616 session->first_burst, session->max_burst);
1617 return -EINVAL;
1618 }
1619
1528 spin_lock_bh(&session->lock); 1620 spin_lock_bh(&session->lock);
1529 conn->c_stage = ISCSI_CONN_STARTED; 1621 conn->c_stage = ISCSI_CONN_STARTED;
1530 session->state = ISCSI_STATE_LOGGED_IN; 1622 session->state = ISCSI_STATE_LOGGED_IN;
diff --git a/drivers/scsi/libsas/Kconfig b/drivers/scsi/libsas/Kconfig
new file mode 100644
index 000000000000..aafdc92f8312
--- /dev/null
+++ b/drivers/scsi/libsas/Kconfig
@@ -0,0 +1,39 @@
1#
2# Kernel configuration file for the SAS Class
3#
4# Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5# Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6#
7# This file is licensed under GPLv2.
8#
9# This program is free software; you can redistribute it and/or
10# modify it under the terms of the GNU General Public License as
11# published by the Free Software Foundation; version 2 of the
12# License.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17# General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program; if not, write to the Free Software
21# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22# USA
23#
24
25config SCSI_SAS_LIBSAS
26 tristate "SAS Domain Transport Attributes"
27 depends on SCSI
28 select SCSI_SAS_ATTRS
29 help
30 This provides transport specific helpers for SAS drivers which
31 use the domain device construct (like the aic94xxx).
32
33config SCSI_SAS_LIBSAS_DEBUG
34 bool "Compile the SAS Domain Transport Attributes in debug mode"
35 default y
36 depends on SCSI_SAS_LIBSAS
37 help
38 Compiles the SAS Layer in debug mode. In debug mode, the
39 SAS Layer prints diagnostic and debug messages.
diff --git a/drivers/scsi/libsas/Makefile b/drivers/scsi/libsas/Makefile
new file mode 100644
index 000000000000..44d972a3b4bd
--- /dev/null
+++ b/drivers/scsi/libsas/Makefile
@@ -0,0 +1,36 @@
1#
2# Kernel Makefile for the libsas helpers
3#
4# Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5# Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6#
7# This file is licensed under GPLv2.
8#
9# This program is free software; you can redistribute it and/or
10# modify it under the terms of the GNU General Public License as
11# published by the Free Software Foundation; version 2 of the
12# License.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17# General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program; if not, write to the Free Software
21# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22# USA
23
24ifeq ($(CONFIG_SCSI_SAS_LIBSAS_DEBUG),y)
25 EXTRA_CFLAGS += -DSAS_DEBUG
26endif
27
28obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas.o
29libsas-y += sas_init.o \
30 sas_phy.o \
31 sas_port.o \
32 sas_event.o \
33 sas_dump.o \
34 sas_discover.o \
35 sas_expander.o \
36 sas_scsi_host.o
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
new file mode 100644
index 000000000000..d977bd492d8d
--- /dev/null
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -0,0 +1,749 @@
1/*
2 * Serial Attached SCSI (SAS) Discover process
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */
24
25#include <linux/pci.h>
26#include <linux/scatterlist.h>
27#include <scsi/scsi_host.h>
28#include <scsi/scsi_eh.h>
29#include "sas_internal.h"
30
31#include <scsi/scsi_transport.h>
32#include <scsi/scsi_transport_sas.h>
33#include "../scsi_sas_internal.h"
34
35/* ---------- Basic task processing for discovery purposes ---------- */
36
37void sas_init_dev(struct domain_device *dev)
38{
39 INIT_LIST_HEAD(&dev->siblings);
40 INIT_LIST_HEAD(&dev->dev_list_node);
41 switch (dev->dev_type) {
42 case SAS_END_DEV:
43 break;
44 case EDGE_DEV:
45 case FANOUT_DEV:
46 INIT_LIST_HEAD(&dev->ex_dev.children);
47 break;
48 case SATA_DEV:
49 case SATA_PM:
50 case SATA_PM_PORT:
51 INIT_LIST_HEAD(&dev->sata_dev.children);
52 break;
53 default:
54 break;
55 }
56}
57
58static void sas_task_timedout(unsigned long _task)
59{
60 struct sas_task *task = (void *) _task;
61 unsigned long flags;
62
63 spin_lock_irqsave(&task->task_state_lock, flags);
64 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
65 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
66 spin_unlock_irqrestore(&task->task_state_lock, flags);
67
68 complete(&task->completion);
69}
70
71static void sas_disc_task_done(struct sas_task *task)
72{
73 if (!del_timer(&task->timer))
74 return;
75 complete(&task->completion);
76}
77
78#define SAS_DEV_TIMEOUT 10
79
80/**
81 * sas_execute_task -- Basic task processing for discovery
82 * @task: the task to be executed
83 * @buffer: pointer to buffer to do I/O
84 * @size: size of @buffer
85 * @pci_dma_dir: PCI_DMA_...
86 */
87static int sas_execute_task(struct sas_task *task, void *buffer, int size,
88 int pci_dma_dir)
89{
90 int res = 0;
91 struct scatterlist *scatter = NULL;
92 struct task_status_struct *ts = &task->task_status;
93 int num_scatter = 0;
94 int retries = 0;
95 struct sas_internal *i =
96 to_sas_internal(task->dev->port->ha->core.shost->transportt);
97
98 if (pci_dma_dir != PCI_DMA_NONE) {
99 scatter = kzalloc(sizeof(*scatter), GFP_KERNEL);
100 if (!scatter)
101 goto out;
102
103 sg_init_one(scatter, buffer, size);
104 num_scatter = 1;
105 }
106
107 task->task_proto = task->dev->tproto;
108 task->scatter = scatter;
109 task->num_scatter = num_scatter;
110 task->total_xfer_len = size;
111 task->data_dir = pci_dma_dir;
112 task->task_done = sas_disc_task_done;
113
114 for (retries = 0; retries < 5; retries++) {
115 task->task_state_flags = SAS_TASK_STATE_PENDING;
116 init_completion(&task->completion);
117
118 task->timer.data = (unsigned long) task;
119 task->timer.function = sas_task_timedout;
120 task->timer.expires = jiffies + SAS_DEV_TIMEOUT*HZ;
121 add_timer(&task->timer);
122
123 res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL);
124 if (res) {
125 del_timer(&task->timer);
126 SAS_DPRINTK("executing SAS discovery task failed:%d\n",
127 res);
128 goto ex_err;
129 }
130 wait_for_completion(&task->completion);
131 res = -ETASK;
132 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
133 int res2;
134 SAS_DPRINTK("task aborted, flags:0x%x\n",
135 task->task_state_flags);
136 res2 = i->dft->lldd_abort_task(task);
137 SAS_DPRINTK("came back from abort task\n");
138 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
139 if (res2 == TMF_RESP_FUNC_COMPLETE)
140 continue; /* Retry the task */
141 else
142 goto ex_err;
143 }
144 }
145 if (task->task_status.stat == SAM_BUSY ||
146 task->task_status.stat == SAM_TASK_SET_FULL ||
147 task->task_status.stat == SAS_QUEUE_FULL) {
148 SAS_DPRINTK("task: q busy, sleeping...\n");
149 schedule_timeout_interruptible(HZ);
150 } else if (task->task_status.stat == SAM_CHECK_COND) {
151 struct scsi_sense_hdr shdr;
152
153 if (!scsi_normalize_sense(ts->buf, ts->buf_valid_size,
154 &shdr)) {
155 SAS_DPRINTK("couldn't normalize sense\n");
156 continue;
157 }
158 if ((shdr.sense_key == 6 && shdr.asc == 0x29) ||
159 (shdr.sense_key == 2 && shdr.asc == 4 &&
160 shdr.ascq == 1)) {
161 SAS_DPRINTK("device %016llx LUN: %016llx "
162 "powering up or not ready yet, "
163 "sleeping...\n",
164 SAS_ADDR(task->dev->sas_addr),
165 SAS_ADDR(task->ssp_task.LUN));
166
167 schedule_timeout_interruptible(5*HZ);
168 } else if (shdr.sense_key == 1) {
169 res = 0;
170 break;
171 } else if (shdr.sense_key == 5) {
172 break;
173 } else {
174 SAS_DPRINTK("dev %016llx LUN: %016llx "
175 "sense key:0x%x ASC:0x%x ASCQ:0x%x"
176 "\n",
177 SAS_ADDR(task->dev->sas_addr),
178 SAS_ADDR(task->ssp_task.LUN),
179 shdr.sense_key,
180 shdr.asc, shdr.ascq);
181 }
182 } else if (task->task_status.resp != SAS_TASK_COMPLETE ||
183 task->task_status.stat != SAM_GOOD) {
184 SAS_DPRINTK("task finished with resp:0x%x, "
185 "stat:0x%x\n",
186 task->task_status.resp,
187 task->task_status.stat);
188 goto ex_err;
189 } else {
190 res = 0;
191 break;
192 }
193 }
194ex_err:
195 if (pci_dma_dir != PCI_DMA_NONE)
196 kfree(scatter);
197out:
198 return res;
199}
200
201/* ---------- Domain device discovery ---------- */
202
203/**
204 * sas_get_port_device -- Discover devices which caused port creation
205 * @port: pointer to struct sas_port of interest
206 *
207 * Devices directly attached to a HA port, have no parent. This is
208 * how we know they are (domain) "root" devices. All other devices
209 * do, and should have their "parent" pointer set appropriately as
210 * soon as a child device is discovered.
211 */
212static int sas_get_port_device(struct asd_sas_port *port)
213{
214 unsigned long flags;
215 struct asd_sas_phy *phy;
216 struct sas_rphy *rphy;
217 struct domain_device *dev;
218
219 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
220 if (!dev)
221 return -ENOMEM;
222
223 spin_lock_irqsave(&port->phy_list_lock, flags);
224 if (list_empty(&port->phy_list)) {
225 spin_unlock_irqrestore(&port->phy_list_lock, flags);
226 kfree(dev);
227 return -ENODEV;
228 }
229 phy = container_of(port->phy_list.next, struct asd_sas_phy, port_phy_el);
230 spin_lock(&phy->frame_rcvd_lock);
231 memcpy(dev->frame_rcvd, phy->frame_rcvd, min(sizeof(dev->frame_rcvd),
232 (size_t)phy->frame_rcvd_size));
233 spin_unlock(&phy->frame_rcvd_lock);
234 spin_unlock_irqrestore(&port->phy_list_lock, flags);
235
236 if (dev->frame_rcvd[0] == 0x34 && port->oob_mode == SATA_OOB_MODE) {
237 struct dev_to_host_fis *fis =
238 (struct dev_to_host_fis *) dev->frame_rcvd;
239 if (fis->interrupt_reason == 1 && fis->lbal == 1 &&
240 fis->byte_count_low==0x69 && fis->byte_count_high == 0x96
241 && (fis->device & ~0x10) == 0)
242 dev->dev_type = SATA_PM;
243 else
244 dev->dev_type = SATA_DEV;
245 dev->tproto = SATA_PROTO;
246 } else {
247 struct sas_identify_frame *id =
248 (struct sas_identify_frame *) dev->frame_rcvd;
249 dev->dev_type = id->dev_type;
250 dev->iproto = id->initiator_bits;
251 dev->tproto = id->target_bits;
252 }
253
254 sas_init_dev(dev);
255
256 switch (dev->dev_type) {
257 case SAS_END_DEV:
258 rphy = sas_end_device_alloc(port->port);
259 break;
260 case EDGE_DEV:
261 rphy = sas_expander_alloc(port->port,
262 SAS_EDGE_EXPANDER_DEVICE);
263 break;
264 case FANOUT_DEV:
265 rphy = sas_expander_alloc(port->port,
266 SAS_FANOUT_EXPANDER_DEVICE);
267 break;
268 case SATA_DEV:
269 default:
270 printk("ERROR: Unidentified device type %d\n", dev->dev_type);
271 rphy = NULL;
272 break;
273 }
274
275 if (!rphy) {
276 kfree(dev);
277 return -ENODEV;
278 }
279 rphy->identify.phy_identifier = phy->phy->identify.phy_identifier;
280 memcpy(dev->sas_addr, port->attached_sas_addr, SAS_ADDR_SIZE);
281 sas_fill_in_rphy(dev, rphy);
282 sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr);
283 port->port_dev = dev;
284 dev->port = port;
285 dev->linkrate = port->linkrate;
286 dev->min_linkrate = port->linkrate;
287 dev->max_linkrate = port->linkrate;
288 dev->pathways = port->num_phys;
289 memset(port->disc.fanout_sas_addr, 0, SAS_ADDR_SIZE);
290 memset(port->disc.eeds_a, 0, SAS_ADDR_SIZE);
291 memset(port->disc.eeds_b, 0, SAS_ADDR_SIZE);
292 port->disc.max_level = 0;
293
294 dev->rphy = rphy;
295 spin_lock(&port->dev_list_lock);
296 list_add_tail(&dev->dev_list_node, &port->dev_list);
297 spin_unlock(&port->dev_list_lock);
298
299 return 0;
300}
301
302/* ---------- Discover and Revalidate ---------- */
303
304/* ---------- SATA ---------- */
305
306static void sas_get_ata_command_set(struct domain_device *dev)
307{
308 struct dev_to_host_fis *fis =
309 (struct dev_to_host_fis *) dev->frame_rcvd;
310
311 if ((fis->sector_count == 1 && /* ATA */
312 fis->lbal == 1 &&
313 fis->lbam == 0 &&
314 fis->lbah == 0 &&
315 fis->device == 0)
316 ||
317 (fis->sector_count == 0 && /* CE-ATA (mATA) */
318 fis->lbal == 0 &&
319 fis->lbam == 0xCE &&
320 fis->lbah == 0xAA &&
321 (fis->device & ~0x10) == 0))
322
323 dev->sata_dev.command_set = ATA_COMMAND_SET;
324
325 else if ((fis->interrupt_reason == 1 && /* ATAPI */
326 fis->lbal == 1 &&
327 fis->byte_count_low == 0x14 &&
328 fis->byte_count_high == 0xEB &&
329 (fis->device & ~0x10) == 0))
330
331 dev->sata_dev.command_set = ATAPI_COMMAND_SET;
332
333 else if ((fis->sector_count == 1 && /* SEMB */
334 fis->lbal == 1 &&
335 fis->lbam == 0x3C &&
336 fis->lbah == 0xC3 &&
337 fis->device == 0)
338 ||
339 (fis->interrupt_reason == 1 && /* SATA PM */
340 fis->lbal == 1 &&
341 fis->byte_count_low == 0x69 &&
342 fis->byte_count_high == 0x96 &&
343 (fis->device & ~0x10) == 0))
344
345 /* Treat it as a superset? */
346 dev->sata_dev.command_set = ATAPI_COMMAND_SET;
347}
348
349/**
350 * sas_issue_ata_cmd -- Basic SATA command processing for discovery
351 * @dev: the device to send the command to
352 * @command: the command register
353 * @features: the features register
354 * @buffer: pointer to buffer to do I/O
355 * @size: size of @buffer
356 * @pci_dma_dir: PCI_DMA_...
357 */
358static int sas_issue_ata_cmd(struct domain_device *dev, u8 command,
359 u8 features, void *buffer, int size,
360 int pci_dma_dir)
361{
362 int res = 0;
363 struct sas_task *task;
364 struct dev_to_host_fis *d2h_fis = (struct dev_to_host_fis *)
365 &dev->frame_rcvd[0];
366
367 res = -ENOMEM;
368 task = sas_alloc_task(GFP_KERNEL);
369 if (!task)
370 goto out;
371
372 task->dev = dev;
373
374 task->ata_task.fis.command = command;
375 task->ata_task.fis.features = features;
376 task->ata_task.fis.device = d2h_fis->device;
377 task->ata_task.retry_count = 1;
378
379 res = sas_execute_task(task, buffer, size, pci_dma_dir);
380
381 sas_free_task(task);
382out:
383 return res;
384}
385
386static void sas_sata_propagate_sas_addr(struct domain_device *dev)
387{
388 unsigned long flags;
389 struct asd_sas_port *port = dev->port;
390 struct asd_sas_phy *phy;
391
392 BUG_ON(dev->parent);
393
394 memcpy(port->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE);
395 spin_lock_irqsave(&port->phy_list_lock, flags);
396 list_for_each_entry(phy, &port->phy_list, port_phy_el)
397 memcpy(phy->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE);
398 spin_unlock_irqrestore(&port->phy_list_lock, flags);
399}
400
401#define ATA_IDENTIFY_DEV 0xEC
402#define ATA_IDENTIFY_PACKET_DEV 0xA1
403#define ATA_SET_FEATURES 0xEF
404#define ATA_FEATURE_PUP_STBY_SPIN_UP 0x07
405
406/**
407 * sas_discover_sata_dev -- discover a STP/SATA device (SATA_DEV)
408 * @dev: STP/SATA device of interest (ATA/ATAPI)
409 *
410 * The LLDD has already been notified of this device, so that we can
411 * send FISes to it. Here we try to get IDENTIFY DEVICE or IDENTIFY
412 * PACKET DEVICE, if ATAPI device, so that the LLDD can fine-tune its
413 * performance for this device.
414 */
415static int sas_discover_sata_dev(struct domain_device *dev)
416{
417 int res;
418 __le16 *identify_x;
419 u8 command;
420
421 identify_x = kzalloc(512, GFP_KERNEL);
422 if (!identify_x)
423 return -ENOMEM;
424
425 if (dev->sata_dev.command_set == ATA_COMMAND_SET) {
426 dev->sata_dev.identify_device = identify_x;
427 command = ATA_IDENTIFY_DEV;
428 } else {
429 dev->sata_dev.identify_packet_device = identify_x;
430 command = ATA_IDENTIFY_PACKET_DEV;
431 }
432
433 res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512,
434 PCI_DMA_FROMDEVICE);
435 if (res)
436 goto out_err;
437
438 /* lives on the media? */
439 if (le16_to_cpu(identify_x[0]) & 4) {
440 /* incomplete response */
441 SAS_DPRINTK("sending SET FEATURE/PUP_STBY_SPIN_UP to "
442 "dev %llx\n", SAS_ADDR(dev->sas_addr));
443 if (!le16_to_cpu(identify_x[83] & (1<<6)))
444 goto cont1;
445 res = sas_issue_ata_cmd(dev, ATA_SET_FEATURES,
446 ATA_FEATURE_PUP_STBY_SPIN_UP,
447 NULL, 0, PCI_DMA_NONE);
448 if (res)
449 goto cont1;
450
451 schedule_timeout_interruptible(5*HZ); /* More time? */
452 res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512,
453 PCI_DMA_FROMDEVICE);
454 if (res)
455 goto out_err;
456 }
457cont1:
458 /* Get WWN */
459 if (dev->port->oob_mode != SATA_OOB_MODE) {
460 memcpy(dev->sas_addr, dev->sata_dev.rps_resp.rps.stp_sas_addr,
461 SAS_ADDR_SIZE);
462 } else if (dev->sata_dev.command_set == ATA_COMMAND_SET &&
463 (le16_to_cpu(dev->sata_dev.identify_device[108]) & 0xF000)
464 == 0x5000) {
465 int i;
466
467 for (i = 0; i < 4; i++) {
468 dev->sas_addr[2*i] =
469 (le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0xFF00) >> 8;
470 dev->sas_addr[2*i+1] =
471 le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0x00FF;
472 }
473 }
474 sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr);
475 if (!dev->parent)
476 sas_sata_propagate_sas_addr(dev);
477
478 /* XXX Hint: register this SATA device with SATL.
479 When this returns, dev->sata_dev->lu is alive and
480 present.
481 sas_satl_register_dev(dev);
482 */
483 return 0;
484out_err:
485 dev->sata_dev.identify_packet_device = NULL;
486 dev->sata_dev.identify_device = NULL;
487 kfree(identify_x);
488 return res;
489}
490
491static int sas_discover_sata_pm(struct domain_device *dev)
492{
493 return -ENODEV;
494}
495
496int sas_notify_lldd_dev_found(struct domain_device *dev)
497{
498 int res = 0;
499 struct sas_ha_struct *sas_ha = dev->port->ha;
500 struct Scsi_Host *shost = sas_ha->core.shost;
501 struct sas_internal *i = to_sas_internal(shost->transportt);
502
503 if (i->dft->lldd_dev_found) {
504 res = i->dft->lldd_dev_found(dev);
505 if (res) {
506 printk("sas: driver on pcidev %s cannot handle "
507 "device %llx, error:%d\n",
508 pci_name(sas_ha->pcidev),
509 SAS_ADDR(dev->sas_addr), res);
510 }
511 }
512 return res;
513}
514
515
516void sas_notify_lldd_dev_gone(struct domain_device *dev)
517{
518 struct sas_ha_struct *sas_ha = dev->port->ha;
519 struct Scsi_Host *shost = sas_ha->core.shost;
520 struct sas_internal *i = to_sas_internal(shost->transportt);
521
522 if (i->dft->lldd_dev_gone)
523 i->dft->lldd_dev_gone(dev);
524}
525
526/* ---------- Common/dispatchers ---------- */
527
528/**
529 * sas_discover_sata -- discover an STP/SATA domain device
530 * @dev: pointer to struct domain_device of interest
531 *
532 * First we notify the LLDD of this device, so we can send frames to
533 * it. Then depending on the type of device we call the appropriate
534 * discover functions. Once device discover is done, we notify the
535 * LLDD so that it can fine-tune its parameters for the device, by
536 * removing it and then adding it. That is, the second time around,
537 * the driver would have certain fields, that it is looking at, set.
538 * Finally we initialize the kobj so that the device can be added to
539 * the system at registration time. Devices directly attached to a HA
540 * port, have no parents. All other devices do, and should have their
541 * "parent" pointer set appropriately before calling this function.
542 */
543int sas_discover_sata(struct domain_device *dev)
544{
545 int res;
546
547 sas_get_ata_command_set(dev);
548
549 res = sas_notify_lldd_dev_found(dev);
550 if (res)
551 return res;
552
553 switch (dev->dev_type) {
554 case SATA_DEV:
555 res = sas_discover_sata_dev(dev);
556 break;
557 case SATA_PM:
558 res = sas_discover_sata_pm(dev);
559 break;
560 default:
561 break;
562 }
563
564 sas_notify_lldd_dev_gone(dev);
565 if (!res) {
566 sas_notify_lldd_dev_found(dev);
567 }
568 return res;
569}
570
571/**
572 * sas_discover_end_dev -- discover an end device (SSP, etc)
573 * @end: pointer to domain device of interest
574 *
575 * See comment in sas_discover_sata().
576 */
577int sas_discover_end_dev(struct domain_device *dev)
578{
579 int res;
580
581 res = sas_notify_lldd_dev_found(dev);
582 if (res)
583 return res;
584
585 res = sas_rphy_add(dev->rphy);
586 if (res)
587 goto out_err;
588
589 /* do this to get the end device port attributes which will have
590 * been scanned in sas_rphy_add */
591 sas_notify_lldd_dev_gone(dev);
592 sas_notify_lldd_dev_found(dev);
593
594 return 0;
595
596out_err:
597 sas_notify_lldd_dev_gone(dev);
598 return res;
599}
600
601/* ---------- Device registration and unregistration ---------- */
602
603static inline void sas_unregister_common_dev(struct domain_device *dev)
604{
605 sas_notify_lldd_dev_gone(dev);
606 if (!dev->parent)
607 dev->port->port_dev = NULL;
608 else
609 list_del_init(&dev->siblings);
610 list_del_init(&dev->dev_list_node);
611}
612
613void sas_unregister_dev(struct domain_device *dev)
614{
615 if (dev->rphy) {
616 sas_remove_children(&dev->rphy->dev);
617 sas_rphy_delete(dev->rphy);
618 dev->rphy = NULL;
619 }
620 if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) {
621 /* remove the phys and ports, everything else should be gone */
622 kfree(dev->ex_dev.ex_phy);
623 dev->ex_dev.ex_phy = NULL;
624 }
625 sas_unregister_common_dev(dev);
626}
627
628void sas_unregister_domain_devices(struct asd_sas_port *port)
629{
630 struct domain_device *dev, *n;
631
632 list_for_each_entry_safe_reverse(dev,n,&port->dev_list,dev_list_node)
633 sas_unregister_dev(dev);
634
635 port->port->rphy = NULL;
636
637}
638
639/* ---------- Discovery and Revalidation ---------- */
640
641/**
642 * sas_discover_domain -- discover the domain
643 * @port: port to the domain of interest
644 *
645 * NOTE: this process _must_ quit (return) as soon as any connection
646 * errors are encountered. Connection recovery is done elsewhere.
647 * Discover process only interrogates devices in order to discover the
648 * domain.
649 */
650static void sas_discover_domain(void *data)
651{
652 int error = 0;
653 struct asd_sas_port *port = data;
654
655 sas_begin_event(DISCE_DISCOVER_DOMAIN, &port->disc.disc_event_lock,
656 &port->disc.pending);
657
658 if (port->port_dev)
659 return ;
660 else {
661 error = sas_get_port_device(port);
662 if (error)
663 return;
664 }
665
666 SAS_DPRINTK("DOING DISCOVERY on port %d, pid:%d\n", port->id,
667 current->pid);
668
669 switch (port->port_dev->dev_type) {
670 case SAS_END_DEV:
671 error = sas_discover_end_dev(port->port_dev);
672 break;
673 case EDGE_DEV:
674 case FANOUT_DEV:
675 error = sas_discover_root_expander(port->port_dev);
676 break;
677 case SATA_DEV:
678 case SATA_PM:
679 error = sas_discover_sata(port->port_dev);
680 break;
681 default:
682 SAS_DPRINTK("unhandled device %d\n", port->port_dev->dev_type);
683 break;
684 }
685
686 if (error) {
687 kfree(port->port_dev); /* not kobject_register-ed yet */
688 port->port_dev = NULL;
689 }
690
691 SAS_DPRINTK("DONE DISCOVERY on port %d, pid:%d, result:%d\n", port->id,
692 current->pid, error);
693}
694
695static void sas_revalidate_domain(void *data)
696{
697 int res = 0;
698 struct asd_sas_port *port = data;
699
700 sas_begin_event(DISCE_REVALIDATE_DOMAIN, &port->disc.disc_event_lock,
701 &port->disc.pending);
702
703 SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
704 current->pid);
705 if (port->port_dev)
706 res = sas_ex_revalidate_domain(port->port_dev);
707
708 SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
709 port->id, current->pid, res);
710}
711
712/* ---------- Events ---------- */
713
714int sas_discover_event(struct asd_sas_port *port, enum discover_event ev)
715{
716 struct sas_discovery *disc;
717
718 if (!port)
719 return 0;
720 disc = &port->disc;
721
722 BUG_ON(ev >= DISC_NUM_EVENTS);
723
724 sas_queue_event(ev, &disc->disc_event_lock, &disc->pending,
725 &disc->disc_work[ev], port->ha->core.shost);
726
727 return 0;
728}
729
730/**
731 * sas_init_disc -- initialize the discovery struct in the port
732 * @port: pointer to struct port
733 *
734 * Called when the ports are being initialized.
735 */
736void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
737{
738 int i;
739
740 static void (*sas_event_fns[DISC_NUM_EVENTS])(void *) = {
741 [DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
742 [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
743 };
744
745 spin_lock_init(&disc->disc_event_lock);
746 disc->pending = 0;
747 for (i = 0; i < DISC_NUM_EVENTS; i++)
748 INIT_WORK(&disc->disc_work[i], sas_event_fns[i], port);
749}
diff --git a/drivers/scsi/libsas/sas_dump.c b/drivers/scsi/libsas/sas_dump.c
new file mode 100644
index 000000000000..f1246d2c9bef
--- /dev/null
+++ b/drivers/scsi/libsas/sas_dump.c
@@ -0,0 +1,76 @@
1/*
2 * Serial Attached SCSI (SAS) Dump/Debugging routines
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */
24
25#include "sas_dump.h"
26
27#ifdef SAS_DEBUG
28
29static const char *sas_hae_str[] = {
30 [0] = "HAE_RESET",
31};
32
33static const char *sas_porte_str[] = {
34 [0] = "PORTE_BYTES_DMAED",
35 [1] = "PORTE_BROADCAST_RCVD",
36 [2] = "PORTE_LINK_RESET_ERR",
37 [3] = "PORTE_TIMER_EVENT",
38 [4] = "PORTE_HARD_RESET",
39};
40
41static const char *sas_phye_str[] = {
42 [0] = "PHYE_LOSS_OF_SIGNAL",
43 [1] = "PHYE_OOB_DONE",
44 [2] = "PHYE_OOB_ERROR",
45 [3] = "PHYE_SPINUP_HOLD",
46};
47
48void sas_dprint_porte(int phyid, enum port_event pe)
49{
50 SAS_DPRINTK("phy%d: port event: %s\n", phyid, sas_porte_str[pe]);
51}
52void sas_dprint_phye(int phyid, enum phy_event pe)
53{
54 SAS_DPRINTK("phy%d: phy event: %s\n", phyid, sas_phye_str[pe]);
55}
56
57void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he)
58{
59 SAS_DPRINTK("ha %s: %s event\n", pci_name(sas_ha->pcidev),
60 sas_hae_str[he]);
61}
62
63void sas_dump_port(struct asd_sas_port *port)
64{
65 SAS_DPRINTK("port%d: class:0x%x\n", port->id, port->class);
66 SAS_DPRINTK("port%d: sas_addr:%llx\n", port->id,
67 SAS_ADDR(port->sas_addr));
68 SAS_DPRINTK("port%d: attached_sas_addr:%llx\n", port->id,
69 SAS_ADDR(port->attached_sas_addr));
70 SAS_DPRINTK("port%d: iproto:0x%x\n", port->id, port->iproto);
71 SAS_DPRINTK("port%d: tproto:0x%x\n", port->id, port->tproto);
72 SAS_DPRINTK("port%d: oob_mode:0x%x\n", port->id, port->oob_mode);
73 SAS_DPRINTK("port%d: num_phys:%d\n", port->id, port->num_phys);
74}
75
76#endif /* SAS_DEBUG */
diff --git a/drivers/scsi/libsas/sas_dump.h b/drivers/scsi/libsas/sas_dump.h
new file mode 100644
index 000000000000..47b45d4f5258
--- /dev/null
+++ b/drivers/scsi/libsas/sas_dump.h
@@ -0,0 +1,42 @@
1/*
2 * Serial Attached SCSI (SAS) Dump/Debugging routines header file
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */
24
25#include "sas_internal.h"
26
27#ifdef SAS_DEBUG
28
29void sas_dprint_porte(int phyid, enum port_event pe);
30void sas_dprint_phye(int phyid, enum phy_event pe);
31void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he);
32void sas_dump_port(struct asd_sas_port *port);
33
34#else /* SAS_DEBUG */
35
36static inline void sas_dprint_porte(int phyid, enum port_event pe) { }
37static inline void sas_dprint_phye(int phyid, enum phy_event pe) { }
38static inline void sas_dprint_hae(struct sas_ha_struct *sas_ha,
39 enum ha_event he) { }
40static inline void sas_dump_port(struct asd_sas_port *port) { }
41
42#endif /* SAS_DEBUG */
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
new file mode 100644
index 000000000000..19110ed1c89c
--- /dev/null
+++ b/drivers/scsi/libsas/sas_event.c
@@ -0,0 +1,75 @@
1/*
2 * Serial Attached SCSI (SAS) Event processing
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */
24
25#include <scsi/scsi_host.h>
26#include "sas_internal.h"
27#include "sas_dump.h"
28
29static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event)
30{
31 BUG_ON(event >= HA_NUM_EVENTS);
32
33 sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending,
34 &sas_ha->ha_events[event], sas_ha->core.shost);
35}
36
37static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
38{
39 struct sas_ha_struct *ha = phy->ha;
40
41 BUG_ON(event >= PORT_NUM_EVENTS);
42
43 sas_queue_event(event, &ha->event_lock, &phy->port_events_pending,
44 &phy->port_events[event], ha->core.shost);
45}
46
47static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
48{
49 struct sas_ha_struct *ha = phy->ha;
50
51 BUG_ON(event >= PHY_NUM_EVENTS);
52
53 sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending,
54 &phy->phy_events[event], ha->core.shost);
55}
56
57int sas_init_events(struct sas_ha_struct *sas_ha)
58{
59 static void (*sas_ha_event_fns[HA_NUM_EVENTS])(void *) = {
60 [HAE_RESET] = sas_hae_reset,
61 };
62
63 int i;
64
65 spin_lock_init(&sas_ha->event_lock);
66
67 for (i = 0; i < HA_NUM_EVENTS; i++)
68 INIT_WORK(&sas_ha->ha_events[i], sas_ha_event_fns[i], sas_ha);
69
70 sas_ha->notify_ha_event = notify_ha_event;
71 sas_ha->notify_port_event = notify_port_event;
72 sas_ha->notify_phy_event = notify_phy_event;
73
74 return 0;
75}
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
new file mode 100644
index 000000000000..30b8014bcc7a
--- /dev/null
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -0,0 +1,1855 @@
1/*
2 * Serial Attached SCSI (SAS) Expander discovery and configuration
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */
24
25#include <linux/pci.h>
26#include <linux/scatterlist.h>
27
28#include "sas_internal.h"
29
30#include <scsi/scsi_transport.h>
31#include <scsi/scsi_transport_sas.h>
32#include "../scsi_sas_internal.h"
33
34static int sas_discover_expander(struct domain_device *dev);
35static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr);
36static int sas_configure_phy(struct domain_device *dev, int phy_id,
37 u8 *sas_addr, int include);
38static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr);
39
40#if 0
41/* FIXME: smp needs to migrate into the sas class */
42static ssize_t smp_portal_read(struct kobject *, char *, loff_t, size_t);
43static ssize_t smp_portal_write(struct kobject *, char *, loff_t, size_t);
44#endif
45
46/* ---------- SMP task management ---------- */
47
48static void smp_task_timedout(unsigned long _task)
49{
50 struct sas_task *task = (void *) _task;
51 unsigned long flags;
52
53 spin_lock_irqsave(&task->task_state_lock, flags);
54 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
55 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
56 spin_unlock_irqrestore(&task->task_state_lock, flags);
57
58 complete(&task->completion);
59}
60
61static void smp_task_done(struct sas_task *task)
62{
63 if (!del_timer(&task->timer))
64 return;
65 complete(&task->completion);
66}
67
68/* Give it some long enough timeout. In seconds. */
69#define SMP_TIMEOUT 10
70
71static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
72 void *resp, int resp_size)
73{
74 int res;
75 struct sas_task *task = sas_alloc_task(GFP_KERNEL);
76 struct sas_internal *i =
77 to_sas_internal(dev->port->ha->core.shost->transportt);
78
79 if (!task)
80 return -ENOMEM;
81
82 task->dev = dev;
83 task->task_proto = dev->tproto;
84 sg_init_one(&task->smp_task.smp_req, req, req_size);
85 sg_init_one(&task->smp_task.smp_resp, resp, resp_size);
86
87 task->task_done = smp_task_done;
88
89 task->timer.data = (unsigned long) task;
90 task->timer.function = smp_task_timedout;
91 task->timer.expires = jiffies + SMP_TIMEOUT*HZ;
92 add_timer(&task->timer);
93
94 res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL);
95
96 if (res) {
97 del_timer(&task->timer);
98 SAS_DPRINTK("executing SMP task failed:%d\n", res);
99 goto ex_err;
100 }
101
102 wait_for_completion(&task->completion);
103 res = -ETASK;
104 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
105 SAS_DPRINTK("smp task timed out or aborted\n");
106 i->dft->lldd_abort_task(task);
107 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
108 SAS_DPRINTK("SMP task aborted and not done\n");
109 goto ex_err;
110 }
111 }
112 if (task->task_status.resp == SAS_TASK_COMPLETE &&
113 task->task_status.stat == SAM_GOOD)
114 res = 0;
115 else
116 SAS_DPRINTK("%s: task to dev %016llx response: 0x%x "
117 "status 0x%x\n", __FUNCTION__,
118 SAS_ADDR(dev->sas_addr),
119 task->task_status.resp,
120 task->task_status.stat);
121ex_err:
122 sas_free_task(task);
123 return res;
124}
125
126/* ---------- Allocations ---------- */
127
128static inline void *alloc_smp_req(int size)
129{
130 u8 *p = kzalloc(size, GFP_KERNEL);
131 if (p)
132 p[0] = SMP_REQUEST;
133 return p;
134}
135
136static inline void *alloc_smp_resp(int size)
137{
138 return kzalloc(size, GFP_KERNEL);
139}
140
141/* ---------- Expander configuration ---------- */
142
143static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
144 void *disc_resp)
145{
146 struct expander_device *ex = &dev->ex_dev;
147 struct ex_phy *phy = &ex->ex_phy[phy_id];
148 struct smp_resp *resp = disc_resp;
149 struct discover_resp *dr = &resp->disc;
150 struct sas_rphy *rphy = dev->rphy;
151 int rediscover = (phy->phy != NULL);
152
153 if (!rediscover) {
154 phy->phy = sas_phy_alloc(&rphy->dev, phy_id);
155
156 /* FIXME: error_handling */
157 BUG_ON(!phy->phy);
158 }
159
160 switch (resp->result) {
161 case SMP_RESP_PHY_VACANT:
162 phy->phy_state = PHY_VACANT;
163 return;
164 default:
165 phy->phy_state = PHY_NOT_PRESENT;
166 return;
167 case SMP_RESP_FUNC_ACC:
168 phy->phy_state = PHY_EMPTY; /* do not know yet */
169 break;
170 }
171
172 phy->phy_id = phy_id;
173 phy->attached_dev_type = dr->attached_dev_type;
174 phy->linkrate = dr->linkrate;
175 phy->attached_sata_host = dr->attached_sata_host;
176 phy->attached_sata_dev = dr->attached_sata_dev;
177 phy->attached_sata_ps = dr->attached_sata_ps;
178 phy->attached_iproto = dr->iproto << 1;
179 phy->attached_tproto = dr->tproto << 1;
180 memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE);
181 phy->attached_phy_id = dr->attached_phy_id;
182 phy->phy_change_count = dr->change_count;
183 phy->routing_attr = dr->routing_attr;
184 phy->virtual = dr->virtual;
185 phy->last_da_index = -1;
186
187 phy->phy->identify.initiator_port_protocols = phy->attached_iproto;
188 phy->phy->identify.target_port_protocols = phy->attached_tproto;
189 phy->phy->identify.phy_identifier = phy_id;
190 phy->phy->minimum_linkrate_hw = dr->hmin_linkrate;
191 phy->phy->maximum_linkrate_hw = dr->hmax_linkrate;
192 phy->phy->minimum_linkrate = dr->pmin_linkrate;
193 phy->phy->maximum_linkrate = dr->pmax_linkrate;
194 phy->phy->negotiated_linkrate = phy->linkrate;
195
196 if (!rediscover)
197 sas_phy_add(phy->phy);
198
199 SAS_DPRINTK("ex %016llx phy%02d:%c attached: %016llx\n",
200 SAS_ADDR(dev->sas_addr), phy->phy_id,
201 phy->routing_attr == TABLE_ROUTING ? 'T' :
202 phy->routing_attr == DIRECT_ROUTING ? 'D' :
203 phy->routing_attr == SUBTRACTIVE_ROUTING ? 'S' : '?',
204 SAS_ADDR(phy->attached_sas_addr));
205
206 return;
207}
208
209#define DISCOVER_REQ_SIZE 16
210#define DISCOVER_RESP_SIZE 56
211
212static int sas_ex_phy_discover(struct domain_device *dev, int single)
213{
214 struct expander_device *ex = &dev->ex_dev;
215 int res = 0;
216 u8 *disc_req;
217 u8 *disc_resp;
218
219 disc_req = alloc_smp_req(DISCOVER_REQ_SIZE);
220 if (!disc_req)
221 return -ENOMEM;
222
223 disc_resp = alloc_smp_req(DISCOVER_RESP_SIZE);
224 if (!disc_resp) {
225 kfree(disc_req);
226 return -ENOMEM;
227 }
228
229 disc_req[1] = SMP_DISCOVER;
230
231 if (0 <= single && single < ex->num_phys) {
232 disc_req[9] = single;
233 res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE,
234 disc_resp, DISCOVER_RESP_SIZE);
235 if (res)
236 goto out_err;
237 sas_set_ex_phy(dev, single, disc_resp);
238 } else {
239 int i;
240
241 for (i = 0; i < ex->num_phys; i++) {
242 disc_req[9] = i;
243 res = smp_execute_task(dev, disc_req,
244 DISCOVER_REQ_SIZE, disc_resp,
245 DISCOVER_RESP_SIZE);
246 if (res)
247 goto out_err;
248 sas_set_ex_phy(dev, i, disc_resp);
249 }
250 }
251out_err:
252 kfree(disc_resp);
253 kfree(disc_req);
254 return res;
255}
256
257static int sas_expander_discover(struct domain_device *dev)
258{
259 struct expander_device *ex = &dev->ex_dev;
260 int res = -ENOMEM;
261
262 ex->ex_phy = kzalloc(sizeof(*ex->ex_phy)*ex->num_phys, GFP_KERNEL);
263 if (!ex->ex_phy)
264 return -ENOMEM;
265
266 res = sas_ex_phy_discover(dev, -1);
267 if (res)
268 goto out_err;
269
270 return 0;
271 out_err:
272 kfree(ex->ex_phy);
273 ex->ex_phy = NULL;
274 return res;
275}
276
277#define MAX_EXPANDER_PHYS 128
278
279static void ex_assign_report_general(struct domain_device *dev,
280 struct smp_resp *resp)
281{
282 struct report_general_resp *rg = &resp->rg;
283
284 dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count);
285 dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes);
286 dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS);
287 dev->ex_dev.conf_route_table = rg->conf_route_table;
288 dev->ex_dev.configuring = rg->configuring;
289 memcpy(dev->ex_dev.enclosure_logical_id, rg->enclosure_logical_id, 8);
290}
291
292#define RG_REQ_SIZE 8
293#define RG_RESP_SIZE 32
294
295static int sas_ex_general(struct domain_device *dev)
296{
297 u8 *rg_req;
298 struct smp_resp *rg_resp;
299 int res;
300 int i;
301
302 rg_req = alloc_smp_req(RG_REQ_SIZE);
303 if (!rg_req)
304 return -ENOMEM;
305
306 rg_resp = alloc_smp_resp(RG_RESP_SIZE);
307 if (!rg_resp) {
308 kfree(rg_req);
309 return -ENOMEM;
310 }
311
312 rg_req[1] = SMP_REPORT_GENERAL;
313
314 for (i = 0; i < 5; i++) {
315 res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp,
316 RG_RESP_SIZE);
317
318 if (res) {
319 SAS_DPRINTK("RG to ex %016llx failed:0x%x\n",
320 SAS_ADDR(dev->sas_addr), res);
321 goto out;
322 } else if (rg_resp->result != SMP_RESP_FUNC_ACC) {
323 SAS_DPRINTK("RG:ex %016llx returned SMP result:0x%x\n",
324 SAS_ADDR(dev->sas_addr), rg_resp->result);
325 res = rg_resp->result;
326 goto out;
327 }
328
329 ex_assign_report_general(dev, rg_resp);
330
331 if (dev->ex_dev.configuring) {
332 SAS_DPRINTK("RG: ex %llx self-configuring...\n",
333 SAS_ADDR(dev->sas_addr));
334 schedule_timeout_interruptible(5*HZ);
335 } else
336 break;
337 }
338out:
339 kfree(rg_req);
340 kfree(rg_resp);
341 return res;
342}
343
344static void ex_assign_manuf_info(struct domain_device *dev, void
345 *_mi_resp)
346{
347 u8 *mi_resp = _mi_resp;
348 struct sas_rphy *rphy = dev->rphy;
349 struct sas_expander_device *edev = rphy_to_expander_device(rphy);
350
351 memcpy(edev->vendor_id, mi_resp + 12, SAS_EXPANDER_VENDOR_ID_LEN);
352 memcpy(edev->product_id, mi_resp + 20, SAS_EXPANDER_PRODUCT_ID_LEN);
353 memcpy(edev->product_rev, mi_resp + 36,
354 SAS_EXPANDER_PRODUCT_REV_LEN);
355
356 if (mi_resp[8] & 1) {
357 memcpy(edev->component_vendor_id, mi_resp + 40,
358 SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
359 edev->component_id = mi_resp[48] << 8 | mi_resp[49];
360 edev->component_revision_id = mi_resp[50];
361 }
362}
363
364#define MI_REQ_SIZE 8
365#define MI_RESP_SIZE 64
366
367static int sas_ex_manuf_info(struct domain_device *dev)
368{
369 u8 *mi_req;
370 u8 *mi_resp;
371 int res;
372
373 mi_req = alloc_smp_req(MI_REQ_SIZE);
374 if (!mi_req)
375 return -ENOMEM;
376
377 mi_resp = alloc_smp_resp(MI_RESP_SIZE);
378 if (!mi_resp) {
379 kfree(mi_req);
380 return -ENOMEM;
381 }
382
383 mi_req[1] = SMP_REPORT_MANUF_INFO;
384
385 res = smp_execute_task(dev, mi_req, MI_REQ_SIZE, mi_resp,MI_RESP_SIZE);
386 if (res) {
387 SAS_DPRINTK("MI: ex %016llx failed:0x%x\n",
388 SAS_ADDR(dev->sas_addr), res);
389 goto out;
390 } else if (mi_resp[2] != SMP_RESP_FUNC_ACC) {
391 SAS_DPRINTK("MI ex %016llx returned SMP result:0x%x\n",
392 SAS_ADDR(dev->sas_addr), mi_resp[2]);
393 goto out;
394 }
395
396 ex_assign_manuf_info(dev, mi_resp);
397out:
398 kfree(mi_req);
399 kfree(mi_resp);
400 return res;
401}
402
403#define PC_REQ_SIZE 44
404#define PC_RESP_SIZE 8
405
406int sas_smp_phy_control(struct domain_device *dev, int phy_id,
407 enum phy_func phy_func,
408 struct sas_phy_linkrates *rates)
409{
410 u8 *pc_req;
411 u8 *pc_resp;
412 int res;
413
414 pc_req = alloc_smp_req(PC_REQ_SIZE);
415 if (!pc_req)
416 return -ENOMEM;
417
418 pc_resp = alloc_smp_resp(PC_RESP_SIZE);
419 if (!pc_resp) {
420 kfree(pc_req);
421 return -ENOMEM;
422 }
423
424 pc_req[1] = SMP_PHY_CONTROL;
425 pc_req[9] = phy_id;
426 pc_req[10]= phy_func;
427 if (rates) {
428 pc_req[32] = rates->minimum_linkrate << 4;
429 pc_req[33] = rates->maximum_linkrate << 4;
430 }
431
432 res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp,PC_RESP_SIZE);
433
434 kfree(pc_resp);
435 kfree(pc_req);
436 return res;
437}
438
439static void sas_ex_disable_phy(struct domain_device *dev, int phy_id)
440{
441 struct expander_device *ex = &dev->ex_dev;
442 struct ex_phy *phy = &ex->ex_phy[phy_id];
443
444 sas_smp_phy_control(dev, phy_id, PHY_FUNC_DISABLE, NULL);
445 phy->linkrate = SAS_PHY_DISABLED;
446}
447
448static void sas_ex_disable_port(struct domain_device *dev, u8 *sas_addr)
449{
450 struct expander_device *ex = &dev->ex_dev;
451 int i;
452
453 for (i = 0; i < ex->num_phys; i++) {
454 struct ex_phy *phy = &ex->ex_phy[i];
455
456 if (phy->phy_state == PHY_VACANT ||
457 phy->phy_state == PHY_NOT_PRESENT)
458 continue;
459
460 if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(sas_addr))
461 sas_ex_disable_phy(dev, i);
462 }
463}
464
465static int sas_dev_present_in_domain(struct asd_sas_port *port,
466 u8 *sas_addr)
467{
468 struct domain_device *dev;
469
470 if (SAS_ADDR(port->sas_addr) == SAS_ADDR(sas_addr))
471 return 1;
472 list_for_each_entry(dev, &port->dev_list, dev_list_node) {
473 if (SAS_ADDR(dev->sas_addr) == SAS_ADDR(sas_addr))
474 return 1;
475 }
476 return 0;
477}
478
479#define RPEL_REQ_SIZE 16
480#define RPEL_RESP_SIZE 32
481int sas_smp_get_phy_events(struct sas_phy *phy)
482{
483 int res;
484 struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
485 struct domain_device *dev = sas_find_dev_by_rphy(rphy);
486 u8 *req = alloc_smp_req(RPEL_REQ_SIZE);
487 u8 *resp = kzalloc(RPEL_RESP_SIZE, GFP_KERNEL);
488
489 if (!resp)
490 return -ENOMEM;
491
492 req[1] = SMP_REPORT_PHY_ERR_LOG;
493 req[9] = phy->number;
494
495 res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
496 resp, RPEL_RESP_SIZE);
497
498 if (!res)
499 goto out;
500
501 phy->invalid_dword_count = scsi_to_u32(&resp[12]);
502 phy->running_disparity_error_count = scsi_to_u32(&resp[16]);
503 phy->loss_of_dword_sync_count = scsi_to_u32(&resp[20]);
504 phy->phy_reset_problem_count = scsi_to_u32(&resp[24]);
505
506 out:
507 kfree(resp);
508 return res;
509
510}
511
512#define RPS_REQ_SIZE 16
513#define RPS_RESP_SIZE 60
514
515static int sas_get_report_phy_sata(struct domain_device *dev,
516 int phy_id,
517 struct smp_resp *rps_resp)
518{
519 int res;
520 u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE);
521
522 if (!rps_req)
523 return -ENOMEM;
524
525 rps_req[1] = SMP_REPORT_PHY_SATA;
526 rps_req[9] = phy_id;
527
528 res = smp_execute_task(dev, rps_req, RPS_REQ_SIZE,
529 rps_resp, RPS_RESP_SIZE);
530
531 kfree(rps_req);
532 return 0;
533}
534
535static void sas_ex_get_linkrate(struct domain_device *parent,
536 struct domain_device *child,
537 struct ex_phy *parent_phy)
538{
539 struct expander_device *parent_ex = &parent->ex_dev;
540 struct sas_port *port;
541 int i;
542
543 child->pathways = 0;
544
545 port = parent_phy->port;
546
547 for (i = 0; i < parent_ex->num_phys; i++) {
548 struct ex_phy *phy = &parent_ex->ex_phy[i];
549
550 if (phy->phy_state == PHY_VACANT ||
551 phy->phy_state == PHY_NOT_PRESENT)
552 continue;
553
554 if (SAS_ADDR(phy->attached_sas_addr) ==
555 SAS_ADDR(child->sas_addr)) {
556
557 child->min_linkrate = min(parent->min_linkrate,
558 phy->linkrate);
559 child->max_linkrate = max(parent->max_linkrate,
560 phy->linkrate);
561 child->pathways++;
562 sas_port_add_phy(port, phy->phy);
563 }
564 }
565 child->linkrate = min(parent_phy->linkrate, child->max_linkrate);
566 child->pathways = min(child->pathways, parent->pathways);
567}
568
569static struct domain_device *sas_ex_discover_end_dev(
570 struct domain_device *parent, int phy_id)
571{
572 struct expander_device *parent_ex = &parent->ex_dev;
573 struct ex_phy *phy = &parent_ex->ex_phy[phy_id];
574 struct domain_device *child = NULL;
575 struct sas_rphy *rphy;
576 int res;
577
578 if (phy->attached_sata_host || phy->attached_sata_ps)
579 return NULL;
580
581 child = kzalloc(sizeof(*child), GFP_KERNEL);
582 if (!child)
583 return NULL;
584
585 child->parent = parent;
586 child->port = parent->port;
587 child->iproto = phy->attached_iproto;
588 memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
589 sas_hash_addr(child->hashed_sas_addr, child->sas_addr);
590 phy->port = sas_port_alloc(&parent->rphy->dev, phy_id);
591 BUG_ON(!phy->port);
592 /* FIXME: better error handling*/
593 BUG_ON(sas_port_add(phy->port) != 0);
594 sas_ex_get_linkrate(parent, child, phy);
595
596 if ((phy->attached_tproto & SAS_PROTO_STP) || phy->attached_sata_dev) {
597 child->dev_type = SATA_DEV;
598 if (phy->attached_tproto & SAS_PROTO_STP)
599 child->tproto = phy->attached_tproto;
600 if (phy->attached_sata_dev)
601 child->tproto |= SATA_DEV;
602 res = sas_get_report_phy_sata(parent, phy_id,
603 &child->sata_dev.rps_resp);
604 if (res) {
605 SAS_DPRINTK("report phy sata to %016llx:0x%x returned "
606 "0x%x\n", SAS_ADDR(parent->sas_addr),
607 phy_id, res);
608 kfree(child);
609 return NULL;
610 }
611 memcpy(child->frame_rcvd, &child->sata_dev.rps_resp.rps.fis,
612 sizeof(struct dev_to_host_fis));
613 sas_init_dev(child);
614 res = sas_discover_sata(child);
615 if (res) {
616 SAS_DPRINTK("sas_discover_sata() for device %16llx at "
617 "%016llx:0x%x returned 0x%x\n",
618 SAS_ADDR(child->sas_addr),
619 SAS_ADDR(parent->sas_addr), phy_id, res);
620 kfree(child);
621 return NULL;
622 }
623 } else if (phy->attached_tproto & SAS_PROTO_SSP) {
624 child->dev_type = SAS_END_DEV;
625 rphy = sas_end_device_alloc(phy->port);
626 /* FIXME: error handling */
627 BUG_ON(!rphy);
628 child->tproto = phy->attached_tproto;
629 sas_init_dev(child);
630
631 child->rphy = rphy;
632 sas_fill_in_rphy(child, rphy);
633
634 spin_lock(&parent->port->dev_list_lock);
635 list_add_tail(&child->dev_list_node, &parent->port->dev_list);
636 spin_unlock(&parent->port->dev_list_lock);
637
638 res = sas_discover_end_dev(child);
639 if (res) {
640 SAS_DPRINTK("sas_discover_end_dev() for device %16llx "
641 "at %016llx:0x%x returned 0x%x\n",
642 SAS_ADDR(child->sas_addr),
643 SAS_ADDR(parent->sas_addr), phy_id, res);
644 /* FIXME: this kfrees list elements without removing them */
645 //kfree(child);
646 return NULL;
647 }
648 } else {
649 SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n",
650 phy->attached_tproto, SAS_ADDR(parent->sas_addr),
651 phy_id);
652 }
653
654 list_add_tail(&child->siblings, &parent_ex->children);
655 return child;
656}
657
658static struct domain_device *sas_ex_discover_expander(
659 struct domain_device *parent, int phy_id)
660{
661 struct sas_expander_device *parent_ex = rphy_to_expander_device(parent->rphy);
662 struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id];
663 struct domain_device *child = NULL;
664 struct sas_rphy *rphy;
665 struct sas_expander_device *edev;
666 struct asd_sas_port *port;
667 int res;
668
669 if (phy->routing_attr == DIRECT_ROUTING) {
670 SAS_DPRINTK("ex %016llx:0x%x:D <--> ex %016llx:0x%x is not "
671 "allowed\n",
672 SAS_ADDR(parent->sas_addr), phy_id,
673 SAS_ADDR(phy->attached_sas_addr),
674 phy->attached_phy_id);
675 return NULL;
676 }
677 child = kzalloc(sizeof(*child), GFP_KERNEL);
678 if (!child)
679 return NULL;
680
681 phy->port = sas_port_alloc(&parent->rphy->dev, phy_id);
682 /* FIXME: better error handling */
683 BUG_ON(sas_port_add(phy->port) != 0);
684
685
686 switch (phy->attached_dev_type) {
687 case EDGE_DEV:
688 rphy = sas_expander_alloc(phy->port,
689 SAS_EDGE_EXPANDER_DEVICE);
690 break;
691 case FANOUT_DEV:
692 rphy = sas_expander_alloc(phy->port,
693 SAS_FANOUT_EXPANDER_DEVICE);
694 break;
695 default:
696 rphy = NULL; /* shut gcc up */
697 BUG();
698 }
699 port = parent->port;
700 child->rphy = rphy;
701 edev = rphy_to_expander_device(rphy);
702 child->dev_type = phy->attached_dev_type;
703 child->parent = parent;
704 child->port = port;
705 child->iproto = phy->attached_iproto;
706 child->tproto = phy->attached_tproto;
707 memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
708 sas_hash_addr(child->hashed_sas_addr, child->sas_addr);
709 sas_ex_get_linkrate(parent, child, phy);
710 edev->level = parent_ex->level + 1;
711 parent->port->disc.max_level = max(parent->port->disc.max_level,
712 edev->level);
713 sas_init_dev(child);
714 sas_fill_in_rphy(child, rphy);
715 sas_rphy_add(rphy);
716
717 spin_lock(&parent->port->dev_list_lock);
718 list_add_tail(&child->dev_list_node, &parent->port->dev_list);
719 spin_unlock(&parent->port->dev_list_lock);
720
721 res = sas_discover_expander(child);
722 if (res) {
723 kfree(child);
724 return NULL;
725 }
726 list_add_tail(&child->siblings, &parent->ex_dev.children);
727 return child;
728}
729
730static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
731{
732 struct expander_device *ex = &dev->ex_dev;
733 struct ex_phy *ex_phy = &ex->ex_phy[phy_id];
734 struct domain_device *child = NULL;
735 int res = 0;
736
737 /* Phy state */
738 if (ex_phy->linkrate == SAS_SATA_SPINUP_HOLD) {
739 if (!sas_smp_phy_control(dev, phy_id, PHY_FUNC_LINK_RESET, NULL))
740 res = sas_ex_phy_discover(dev, phy_id);
741 if (res)
742 return res;
743 }
744
745 /* Parent and domain coherency */
746 if (!dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) ==
747 SAS_ADDR(dev->port->sas_addr))) {
748 sas_add_parent_port(dev, phy_id);
749 return 0;
750 }
751 if (dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) ==
752 SAS_ADDR(dev->parent->sas_addr))) {
753 sas_add_parent_port(dev, phy_id);
754 if (ex_phy->routing_attr == TABLE_ROUTING)
755 sas_configure_phy(dev, phy_id, dev->port->sas_addr, 1);
756 return 0;
757 }
758
759 if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr))
760 sas_ex_disable_port(dev, ex_phy->attached_sas_addr);
761
762 if (ex_phy->attached_dev_type == NO_DEVICE) {
763 if (ex_phy->routing_attr == DIRECT_ROUTING) {
764 memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
765 sas_configure_routing(dev, ex_phy->attached_sas_addr);
766 }
767 return 0;
768 } else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN)
769 return 0;
770
771 if (ex_phy->attached_dev_type != SAS_END_DEV &&
772 ex_phy->attached_dev_type != FANOUT_DEV &&
773 ex_phy->attached_dev_type != EDGE_DEV) {
774 SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx "
775 "phy 0x%x\n", ex_phy->attached_dev_type,
776 SAS_ADDR(dev->sas_addr),
777 phy_id);
778 return 0;
779 }
780
781 res = sas_configure_routing(dev, ex_phy->attached_sas_addr);
782 if (res) {
783 SAS_DPRINTK("configure routing for dev %016llx "
784 "reported 0x%x. Forgotten\n",
785 SAS_ADDR(ex_phy->attached_sas_addr), res);
786 sas_disable_routing(dev, ex_phy->attached_sas_addr);
787 return res;
788 }
789
790 switch (ex_phy->attached_dev_type) {
791 case SAS_END_DEV:
792 child = sas_ex_discover_end_dev(dev, phy_id);
793 break;
794 case FANOUT_DEV:
795 if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) {
796 SAS_DPRINTK("second fanout expander %016llx phy 0x%x "
797 "attached to ex %016llx phy 0x%x\n",
798 SAS_ADDR(ex_phy->attached_sas_addr),
799 ex_phy->attached_phy_id,
800 SAS_ADDR(dev->sas_addr),
801 phy_id);
802 sas_ex_disable_phy(dev, phy_id);
803 break;
804 } else
805 memcpy(dev->port->disc.fanout_sas_addr,
806 ex_phy->attached_sas_addr, SAS_ADDR_SIZE);
807 /* fallthrough */
808 case EDGE_DEV:
809 child = sas_ex_discover_expander(dev, phy_id);
810 break;
811 default:
812 break;
813 }
814
815 if (child) {
816 int i;
817
818 for (i = 0; i < ex->num_phys; i++) {
819 if (ex->ex_phy[i].phy_state == PHY_VACANT ||
820 ex->ex_phy[i].phy_state == PHY_NOT_PRESENT)
821 continue;
822
823 if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) ==
824 SAS_ADDR(child->sas_addr))
825 ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED;
826 }
827 }
828
829 return res;
830}
831
832static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr)
833{
834 struct expander_device *ex = &dev->ex_dev;
835 int i;
836
837 for (i = 0; i < ex->num_phys; i++) {
838 struct ex_phy *phy = &ex->ex_phy[i];
839
840 if (phy->phy_state == PHY_VACANT ||
841 phy->phy_state == PHY_NOT_PRESENT)
842 continue;
843
844 if ((phy->attached_dev_type == EDGE_DEV ||
845 phy->attached_dev_type == FANOUT_DEV) &&
846 phy->routing_attr == SUBTRACTIVE_ROUTING) {
847
848 memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE);
849
850 return 1;
851 }
852 }
853 return 0;
854}
855
856static int sas_check_level_subtractive_boundary(struct domain_device *dev)
857{
858 struct expander_device *ex = &dev->ex_dev;
859 struct domain_device *child;
860 u8 sub_addr[8] = {0, };
861
862 list_for_each_entry(child, &ex->children, siblings) {
863 if (child->dev_type != EDGE_DEV &&
864 child->dev_type != FANOUT_DEV)
865 continue;
866 if (sub_addr[0] == 0) {
867 sas_find_sub_addr(child, sub_addr);
868 continue;
869 } else {
870 u8 s2[8];
871
872 if (sas_find_sub_addr(child, s2) &&
873 (SAS_ADDR(sub_addr) != SAS_ADDR(s2))) {
874
875 SAS_DPRINTK("ex %016llx->%016llx-?->%016llx "
876 "diverges from subtractive "
877 "boundary %016llx\n",
878 SAS_ADDR(dev->sas_addr),
879 SAS_ADDR(child->sas_addr),
880 SAS_ADDR(s2),
881 SAS_ADDR(sub_addr));
882
883 sas_ex_disable_port(child, s2);
884 }
885 }
886 }
887 return 0;
888}
889/**
890 * sas_ex_discover_devices -- discover devices attached to this expander
891 * dev: pointer to the expander domain device
892 * single: if you want to do a single phy, else set to -1;
893 *
894 * Configure this expander for use with its devices and register the
895 * devices of this expander.
896 */
897static int sas_ex_discover_devices(struct domain_device *dev, int single)
898{
899 struct expander_device *ex = &dev->ex_dev;
900 int i = 0, end = ex->num_phys;
901 int res = 0;
902
903 if (0 <= single && single < end) {
904 i = single;
905 end = i+1;
906 }
907
908 for ( ; i < end; i++) {
909 struct ex_phy *ex_phy = &ex->ex_phy[i];
910
911 if (ex_phy->phy_state == PHY_VACANT ||
912 ex_phy->phy_state == PHY_NOT_PRESENT ||
913 ex_phy->phy_state == PHY_DEVICE_DISCOVERED)
914 continue;
915
916 switch (ex_phy->linkrate) {
917 case SAS_PHY_DISABLED:
918 case SAS_PHY_RESET_PROBLEM:
919 case SAS_SATA_PORT_SELECTOR:
920 continue;
921 default:
922 res = sas_ex_discover_dev(dev, i);
923 if (res)
924 break;
925 continue;
926 }
927 }
928
929 if (!res)
930 sas_check_level_subtractive_boundary(dev);
931
932 return res;
933}
934
935static int sas_check_ex_subtractive_boundary(struct domain_device *dev)
936{
937 struct expander_device *ex = &dev->ex_dev;
938 int i;
939 u8 *sub_sas_addr = NULL;
940
941 if (dev->dev_type != EDGE_DEV)
942 return 0;
943
944 for (i = 0; i < ex->num_phys; i++) {
945 struct ex_phy *phy = &ex->ex_phy[i];
946
947 if (phy->phy_state == PHY_VACANT ||
948 phy->phy_state == PHY_NOT_PRESENT)
949 continue;
950
951 if ((phy->attached_dev_type == FANOUT_DEV ||
952 phy->attached_dev_type == EDGE_DEV) &&
953 phy->routing_attr == SUBTRACTIVE_ROUTING) {
954
955 if (!sub_sas_addr)
956 sub_sas_addr = &phy->attached_sas_addr[0];
957 else if (SAS_ADDR(sub_sas_addr) !=
958 SAS_ADDR(phy->attached_sas_addr)) {
959
960 SAS_DPRINTK("ex %016llx phy 0x%x "
961 "diverges(%016llx) on subtractive "
962 "boundary(%016llx). Disabled\n",
963 SAS_ADDR(dev->sas_addr), i,
964 SAS_ADDR(phy->attached_sas_addr),
965 SAS_ADDR(sub_sas_addr));
966 sas_ex_disable_phy(dev, i);
967 }
968 }
969 }
970 return 0;
971}
972
973static void sas_print_parent_topology_bug(struct domain_device *child,
974 struct ex_phy *parent_phy,
975 struct ex_phy *child_phy)
976{
977 static const char ra_char[] = {
978 [DIRECT_ROUTING] = 'D',
979 [SUBTRACTIVE_ROUTING] = 'S',
980 [TABLE_ROUTING] = 'T',
981 };
982 static const char *ex_type[] = {
983 [EDGE_DEV] = "edge",
984 [FANOUT_DEV] = "fanout",
985 };
986 struct domain_device *parent = child->parent;
987
988 sas_printk("%s ex %016llx phy 0x%x <--> %s ex %016llx phy 0x%x "
989 "has %c:%c routing link!\n",
990
991 ex_type[parent->dev_type],
992 SAS_ADDR(parent->sas_addr),
993 parent_phy->phy_id,
994
995 ex_type[child->dev_type],
996 SAS_ADDR(child->sas_addr),
997 child_phy->phy_id,
998
999 ra_char[parent_phy->routing_attr],
1000 ra_char[child_phy->routing_attr]);
1001}
1002
1003static int sas_check_eeds(struct domain_device *child,
1004 struct ex_phy *parent_phy,
1005 struct ex_phy *child_phy)
1006{
1007 int res = 0;
1008 struct domain_device *parent = child->parent;
1009
1010 if (SAS_ADDR(parent->port->disc.fanout_sas_addr) != 0) {
1011 res = -ENODEV;
1012 SAS_DPRINTK("edge ex %016llx phy S:0x%x <--> edge ex %016llx "
1013 "phy S:0x%x, while there is a fanout ex %016llx\n",
1014 SAS_ADDR(parent->sas_addr),
1015 parent_phy->phy_id,
1016 SAS_ADDR(child->sas_addr),
1017 child_phy->phy_id,
1018 SAS_ADDR(parent->port->disc.fanout_sas_addr));
1019 } else if (SAS_ADDR(parent->port->disc.eeds_a) == 0) {
1020 memcpy(parent->port->disc.eeds_a, parent->sas_addr,
1021 SAS_ADDR_SIZE);
1022 memcpy(parent->port->disc.eeds_b, child->sas_addr,
1023 SAS_ADDR_SIZE);
1024 } else if (((SAS_ADDR(parent->port->disc.eeds_a) ==
1025 SAS_ADDR(parent->sas_addr)) ||
1026 (SAS_ADDR(parent->port->disc.eeds_a) ==
1027 SAS_ADDR(child->sas_addr)))
1028 &&
1029 ((SAS_ADDR(parent->port->disc.eeds_b) ==
1030 SAS_ADDR(parent->sas_addr)) ||
1031 (SAS_ADDR(parent->port->disc.eeds_b) ==
1032 SAS_ADDR(child->sas_addr))))
1033 ;
1034 else {
1035 res = -ENODEV;
1036 SAS_DPRINTK("edge ex %016llx phy 0x%x <--> edge ex %016llx "
1037 "phy 0x%x link forms a third EEDS!\n",
1038 SAS_ADDR(parent->sas_addr),
1039 parent_phy->phy_id,
1040 SAS_ADDR(child->sas_addr),
1041 child_phy->phy_id);
1042 }
1043
1044 return res;
1045}
1046
1047/* Here we spill over 80 columns. It is intentional.
1048 */
1049static int sas_check_parent_topology(struct domain_device *child)
1050{
1051 struct expander_device *child_ex = &child->ex_dev;
1052 struct expander_device *parent_ex;
1053 int i;
1054 int res = 0;
1055
1056 if (!child->parent)
1057 return 0;
1058
1059 if (child->parent->dev_type != EDGE_DEV &&
1060 child->parent->dev_type != FANOUT_DEV)
1061 return 0;
1062
1063 parent_ex = &child->parent->ex_dev;
1064
1065 for (i = 0; i < parent_ex->num_phys; i++) {
1066 struct ex_phy *parent_phy = &parent_ex->ex_phy[i];
1067 struct ex_phy *child_phy;
1068
1069 if (parent_phy->phy_state == PHY_VACANT ||
1070 parent_phy->phy_state == PHY_NOT_PRESENT)
1071 continue;
1072
1073 if (SAS_ADDR(parent_phy->attached_sas_addr) != SAS_ADDR(child->sas_addr))
1074 continue;
1075
1076 child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id];
1077
1078 switch (child->parent->dev_type) {
1079 case EDGE_DEV:
1080 if (child->dev_type == FANOUT_DEV) {
1081 if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING ||
1082 child_phy->routing_attr != TABLE_ROUTING) {
1083 sas_print_parent_topology_bug(child, parent_phy, child_phy);
1084 res = -ENODEV;
1085 }
1086 } else if (parent_phy->routing_attr == SUBTRACTIVE_ROUTING) {
1087 if (child_phy->routing_attr == SUBTRACTIVE_ROUTING) {
1088 res = sas_check_eeds(child, parent_phy, child_phy);
1089 } else if (child_phy->routing_attr != TABLE_ROUTING) {
1090 sas_print_parent_topology_bug(child, parent_phy, child_phy);
1091 res = -ENODEV;
1092 }
1093 } else if (parent_phy->routing_attr == TABLE_ROUTING &&
1094 child_phy->routing_attr != SUBTRACTIVE_ROUTING) {
1095 sas_print_parent_topology_bug(child, parent_phy, child_phy);
1096 res = -ENODEV;
1097 }
1098 break;
1099 case FANOUT_DEV:
1100 if (parent_phy->routing_attr != TABLE_ROUTING ||
1101 child_phy->routing_attr != SUBTRACTIVE_ROUTING) {
1102 sas_print_parent_topology_bug(child, parent_phy, child_phy);
1103 res = -ENODEV;
1104 }
1105 break;
1106 default:
1107 break;
1108 }
1109 }
1110
1111 return res;
1112}
1113
1114#define RRI_REQ_SIZE 16
1115#define RRI_RESP_SIZE 44
1116
1117static int sas_configure_present(struct domain_device *dev, int phy_id,
1118 u8 *sas_addr, int *index, int *present)
1119{
1120 int i, res = 0;
1121 struct expander_device *ex = &dev->ex_dev;
1122 struct ex_phy *phy = &ex->ex_phy[phy_id];
1123 u8 *rri_req;
1124 u8 *rri_resp;
1125
1126 *present = 0;
1127 *index = 0;
1128
1129 rri_req = alloc_smp_req(RRI_REQ_SIZE);
1130 if (!rri_req)
1131 return -ENOMEM;
1132
1133 rri_resp = alloc_smp_resp(RRI_RESP_SIZE);
1134 if (!rri_resp) {
1135 kfree(rri_req);
1136 return -ENOMEM;
1137 }
1138
1139 rri_req[1] = SMP_REPORT_ROUTE_INFO;
1140 rri_req[9] = phy_id;
1141
1142 for (i = 0; i < ex->max_route_indexes ; i++) {
1143 *(__be16 *)(rri_req+6) = cpu_to_be16(i);
1144 res = smp_execute_task(dev, rri_req, RRI_REQ_SIZE, rri_resp,
1145 RRI_RESP_SIZE);
1146 if (res)
1147 goto out;
1148 res = rri_resp[2];
1149 if (res == SMP_RESP_NO_INDEX) {
1150 SAS_DPRINTK("overflow of indexes: dev %016llx "
1151 "phy 0x%x index 0x%x\n",
1152 SAS_ADDR(dev->sas_addr), phy_id, i);
1153 goto out;
1154 } else if (res != SMP_RESP_FUNC_ACC) {
1155 SAS_DPRINTK("%s: dev %016llx phy 0x%x index 0x%x "
1156 "result 0x%x\n", __FUNCTION__,
1157 SAS_ADDR(dev->sas_addr), phy_id, i, res);
1158 goto out;
1159 }
1160 if (SAS_ADDR(sas_addr) != 0) {
1161 if (SAS_ADDR(rri_resp+16) == SAS_ADDR(sas_addr)) {
1162 *index = i;
1163 if ((rri_resp[12] & 0x80) == 0x80)
1164 *present = 0;
1165 else
1166 *present = 1;
1167 goto out;
1168 } else if (SAS_ADDR(rri_resp+16) == 0) {
1169 *index = i;
1170 *present = 0;
1171 goto out;
1172 }
1173 } else if (SAS_ADDR(rri_resp+16) == 0 &&
1174 phy->last_da_index < i) {
1175 phy->last_da_index = i;
1176 *index = i;
1177 *present = 0;
1178 goto out;
1179 }
1180 }
1181 res = -1;
1182out:
1183 kfree(rri_req);
1184 kfree(rri_resp);
1185 return res;
1186}
1187
1188#define CRI_REQ_SIZE 44
1189#define CRI_RESP_SIZE 8
1190
1191static int sas_configure_set(struct domain_device *dev, int phy_id,
1192 u8 *sas_addr, int index, int include)
1193{
1194 int res;
1195 u8 *cri_req;
1196 u8 *cri_resp;
1197
1198 cri_req = alloc_smp_req(CRI_REQ_SIZE);
1199 if (!cri_req)
1200 return -ENOMEM;
1201
1202 cri_resp = alloc_smp_resp(CRI_RESP_SIZE);
1203 if (!cri_resp) {
1204 kfree(cri_req);
1205 return -ENOMEM;
1206 }
1207
1208 cri_req[1] = SMP_CONF_ROUTE_INFO;
1209 *(__be16 *)(cri_req+6) = cpu_to_be16(index);
1210 cri_req[9] = phy_id;
1211 if (SAS_ADDR(sas_addr) == 0 || !include)
1212 cri_req[12] |= 0x80;
1213 memcpy(cri_req+16, sas_addr, SAS_ADDR_SIZE);
1214
1215 res = smp_execute_task(dev, cri_req, CRI_REQ_SIZE, cri_resp,
1216 CRI_RESP_SIZE);
1217 if (res)
1218 goto out;
1219 res = cri_resp[2];
1220 if (res == SMP_RESP_NO_INDEX) {
1221 SAS_DPRINTK("overflow of indexes: dev %016llx phy 0x%x "
1222 "index 0x%x\n",
1223 SAS_ADDR(dev->sas_addr), phy_id, index);
1224 }
1225out:
1226 kfree(cri_req);
1227 kfree(cri_resp);
1228 return res;
1229}
1230
1231static int sas_configure_phy(struct domain_device *dev, int phy_id,
1232 u8 *sas_addr, int include)
1233{
1234 int index;
1235 int present;
1236 int res;
1237
1238 res = sas_configure_present(dev, phy_id, sas_addr, &index, &present);
1239 if (res)
1240 return res;
1241 if (include ^ present)
1242 return sas_configure_set(dev, phy_id, sas_addr, index,include);
1243
1244 return res;
1245}
1246
1247/**
1248 * sas_configure_parent -- configure routing table of parent
1249 * parent: parent expander
1250 * child: child expander
1251 * sas_addr: SAS port identifier of device directly attached to child
1252 */
1253static int sas_configure_parent(struct domain_device *parent,
1254 struct domain_device *child,
1255 u8 *sas_addr, int include)
1256{
1257 struct expander_device *ex_parent = &parent->ex_dev;
1258 int res = 0;
1259 int i;
1260
1261 if (parent->parent) {
1262 res = sas_configure_parent(parent->parent, parent, sas_addr,
1263 include);
1264 if (res)
1265 return res;
1266 }
1267
1268 if (ex_parent->conf_route_table == 0) {
1269 SAS_DPRINTK("ex %016llx has self-configuring routing table\n",
1270 SAS_ADDR(parent->sas_addr));
1271 return 0;
1272 }
1273
1274 for (i = 0; i < ex_parent->num_phys; i++) {
1275 struct ex_phy *phy = &ex_parent->ex_phy[i];
1276
1277 if ((phy->routing_attr == TABLE_ROUTING) &&
1278 (SAS_ADDR(phy->attached_sas_addr) ==
1279 SAS_ADDR(child->sas_addr))) {
1280 res = sas_configure_phy(parent, i, sas_addr, include);
1281 if (res)
1282 return res;
1283 }
1284 }
1285
1286 return res;
1287}
1288
1289/**
1290 * sas_configure_routing -- configure routing
1291 * dev: expander device
1292 * sas_addr: port identifier of device directly attached to the expander device
1293 */
1294static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr)
1295{
1296 if (dev->parent)
1297 return sas_configure_parent(dev->parent, dev, sas_addr, 1);
1298 return 0;
1299}
1300
1301static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr)
1302{
1303 if (dev->parent)
1304 return sas_configure_parent(dev->parent, dev, sas_addr, 0);
1305 return 0;
1306}
1307
1308#if 0
1309#define SMP_BIN_ATTR_NAME "smp_portal"
1310
1311static void sas_ex_smp_hook(struct domain_device *dev)
1312{
1313 struct expander_device *ex_dev = &dev->ex_dev;
1314 struct bin_attribute *bin_attr = &ex_dev->smp_bin_attr;
1315
1316 memset(bin_attr, 0, sizeof(*bin_attr));
1317
1318 bin_attr->attr.name = SMP_BIN_ATTR_NAME;
1319 bin_attr->attr.owner = THIS_MODULE;
1320 bin_attr->attr.mode = 0600;
1321
1322 bin_attr->size = 0;
1323 bin_attr->private = NULL;
1324 bin_attr->read = smp_portal_read;
1325 bin_attr->write= smp_portal_write;
1326 bin_attr->mmap = NULL;
1327
1328 ex_dev->smp_portal_pid = -1;
1329 init_MUTEX(&ex_dev->smp_sema);
1330}
1331#endif
1332
1333/**
1334 * sas_discover_expander -- expander discovery
1335 * @ex: pointer to expander domain device
1336 *
1337 * See comment in sas_discover_sata().
1338 */
1339static int sas_discover_expander(struct domain_device *dev)
1340{
1341 int res;
1342
1343 res = sas_notify_lldd_dev_found(dev);
1344 if (res)
1345 return res;
1346
1347 res = sas_ex_general(dev);
1348 if (res)
1349 goto out_err;
1350 res = sas_ex_manuf_info(dev);
1351 if (res)
1352 goto out_err;
1353
1354 res = sas_expander_discover(dev);
1355 if (res) {
1356 SAS_DPRINTK("expander %016llx discovery failed(0x%x)\n",
1357 SAS_ADDR(dev->sas_addr), res);
1358 goto out_err;
1359 }
1360
1361 sas_check_ex_subtractive_boundary(dev);
1362 res = sas_check_parent_topology(dev);
1363 if (res)
1364 goto out_err;
1365 return 0;
1366out_err:
1367 sas_notify_lldd_dev_gone(dev);
1368 return res;
1369}
1370
1371static int sas_ex_level_discovery(struct asd_sas_port *port, const int level)
1372{
1373 int res = 0;
1374 struct domain_device *dev;
1375
1376 list_for_each_entry(dev, &port->dev_list, dev_list_node) {
1377 if (dev->dev_type == EDGE_DEV ||
1378 dev->dev_type == FANOUT_DEV) {
1379 struct sas_expander_device *ex =
1380 rphy_to_expander_device(dev->rphy);
1381
1382 if (level == ex->level)
1383 res = sas_ex_discover_devices(dev, -1);
1384 else if (level > 0)
1385 res = sas_ex_discover_devices(port->port_dev, -1);
1386
1387 }
1388 }
1389
1390 return res;
1391}
1392
1393static int sas_ex_bfs_disc(struct asd_sas_port *port)
1394{
1395 int res;
1396 int level;
1397
1398 do {
1399 level = port->disc.max_level;
1400 res = sas_ex_level_discovery(port, level);
1401 mb();
1402 } while (level < port->disc.max_level);
1403
1404 return res;
1405}
1406
1407int sas_discover_root_expander(struct domain_device *dev)
1408{
1409 int res;
1410 struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy);
1411
1412 sas_rphy_add(dev->rphy);
1413
1414 ex->level = dev->port->disc.max_level; /* 0 */
1415 res = sas_discover_expander(dev);
1416 if (!res)
1417 sas_ex_bfs_disc(dev->port);
1418
1419 return res;
1420}
1421
1422/* ---------- Domain revalidation ---------- */
1423
1424static int sas_get_phy_discover(struct domain_device *dev,
1425 int phy_id, struct smp_resp *disc_resp)
1426{
1427 int res;
1428 u8 *disc_req;
1429
1430 disc_req = alloc_smp_req(DISCOVER_REQ_SIZE);
1431 if (!disc_req)
1432 return -ENOMEM;
1433
1434 disc_req[1] = SMP_DISCOVER;
1435 disc_req[9] = phy_id;
1436
1437 res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE,
1438 disc_resp, DISCOVER_RESP_SIZE);
1439 if (res)
1440 goto out;
1441 else if (disc_resp->result != SMP_RESP_FUNC_ACC) {
1442 res = disc_resp->result;
1443 goto out;
1444 }
1445out:
1446 kfree(disc_req);
1447 return res;
1448}
1449
1450static int sas_get_phy_change_count(struct domain_device *dev,
1451 int phy_id, int *pcc)
1452{
1453 int res;
1454 struct smp_resp *disc_resp;
1455
1456 disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
1457 if (!disc_resp)
1458 return -ENOMEM;
1459
1460 res = sas_get_phy_discover(dev, phy_id, disc_resp);
1461 if (!res)
1462 *pcc = disc_resp->disc.change_count;
1463
1464 kfree(disc_resp);
1465 return res;
1466}
1467
1468static int sas_get_phy_attached_sas_addr(struct domain_device *dev,
1469 int phy_id, u8 *attached_sas_addr)
1470{
1471 int res;
1472 struct smp_resp *disc_resp;
1473 struct discover_resp *dr;
1474
1475 disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
1476 if (!disc_resp)
1477 return -ENOMEM;
1478 dr = &disc_resp->disc;
1479
1480 res = sas_get_phy_discover(dev, phy_id, disc_resp);
1481 if (!res) {
1482 memcpy(attached_sas_addr,disc_resp->disc.attached_sas_addr,8);
1483 if (dr->attached_dev_type == 0)
1484 memset(attached_sas_addr, 0, 8);
1485 }
1486 kfree(disc_resp);
1487 return res;
1488}
1489
1490static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id,
1491 int from_phy)
1492{
1493 struct expander_device *ex = &dev->ex_dev;
1494 int res = 0;
1495 int i;
1496
1497 for (i = from_phy; i < ex->num_phys; i++) {
1498 int phy_change_count = 0;
1499
1500 res = sas_get_phy_change_count(dev, i, &phy_change_count);
1501 if (res)
1502 goto out;
1503 else if (phy_change_count != ex->ex_phy[i].phy_change_count) {
1504 ex->ex_phy[i].phy_change_count = phy_change_count;
1505 *phy_id = i;
1506 return 0;
1507 }
1508 }
1509out:
1510 return res;
1511}
1512
1513static int sas_get_ex_change_count(struct domain_device *dev, int *ecc)
1514{
1515 int res;
1516 u8 *rg_req;
1517 struct smp_resp *rg_resp;
1518
1519 rg_req = alloc_smp_req(RG_REQ_SIZE);
1520 if (!rg_req)
1521 return -ENOMEM;
1522
1523 rg_resp = alloc_smp_resp(RG_RESP_SIZE);
1524 if (!rg_resp) {
1525 kfree(rg_req);
1526 return -ENOMEM;
1527 }
1528
1529 rg_req[1] = SMP_REPORT_GENERAL;
1530
1531 res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp,
1532 RG_RESP_SIZE);
1533 if (res)
1534 goto out;
1535 if (rg_resp->result != SMP_RESP_FUNC_ACC) {
1536 res = rg_resp->result;
1537 goto out;
1538 }
1539
1540 *ecc = be16_to_cpu(rg_resp->rg.change_count);
1541out:
1542 kfree(rg_resp);
1543 kfree(rg_req);
1544 return res;
1545}
1546
1547static int sas_find_bcast_dev(struct domain_device *dev,
1548 struct domain_device **src_dev)
1549{
1550 struct expander_device *ex = &dev->ex_dev;
1551 int ex_change_count = -1;
1552 int res;
1553
1554 res = sas_get_ex_change_count(dev, &ex_change_count);
1555 if (res)
1556 goto out;
1557 if (ex_change_count != -1 &&
1558 ex_change_count != ex->ex_change_count) {
1559 *src_dev = dev;
1560 ex->ex_change_count = ex_change_count;
1561 } else {
1562 struct domain_device *ch;
1563
1564 list_for_each_entry(ch, &ex->children, siblings) {
1565 if (ch->dev_type == EDGE_DEV ||
1566 ch->dev_type == FANOUT_DEV) {
1567 res = sas_find_bcast_dev(ch, src_dev);
1568 if (src_dev)
1569 return res;
1570 }
1571 }
1572 }
1573out:
1574 return res;
1575}
1576
1577static void sas_unregister_ex_tree(struct domain_device *dev)
1578{
1579 struct expander_device *ex = &dev->ex_dev;
1580 struct domain_device *child, *n;
1581
1582 list_for_each_entry_safe(child, n, &ex->children, siblings) {
1583 if (child->dev_type == EDGE_DEV ||
1584 child->dev_type == FANOUT_DEV)
1585 sas_unregister_ex_tree(child);
1586 else
1587 sas_unregister_dev(child);
1588 }
1589 sas_unregister_dev(dev);
1590}
1591
1592static void sas_unregister_devs_sas_addr(struct domain_device *parent,
1593 int phy_id)
1594{
1595 struct expander_device *ex_dev = &parent->ex_dev;
1596 struct ex_phy *phy = &ex_dev->ex_phy[phy_id];
1597 struct domain_device *child, *n;
1598
1599 list_for_each_entry_safe(child, n, &ex_dev->children, siblings) {
1600 if (SAS_ADDR(child->sas_addr) ==
1601 SAS_ADDR(phy->attached_sas_addr)) {
1602 if (child->dev_type == EDGE_DEV ||
1603 child->dev_type == FANOUT_DEV)
1604 sas_unregister_ex_tree(child);
1605 else
1606 sas_unregister_dev(child);
1607 break;
1608 }
1609 }
1610 sas_disable_routing(parent, phy->attached_sas_addr);
1611 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
1612 sas_port_delete_phy(phy->port, phy->phy);
1613 if (phy->port->num_phys == 0)
1614 sas_port_delete(phy->port);
1615 phy->port = NULL;
1616}
1617
1618static int sas_discover_bfs_by_root_level(struct domain_device *root,
1619 const int level)
1620{
1621 struct expander_device *ex_root = &root->ex_dev;
1622 struct domain_device *child;
1623 int res = 0;
1624
1625 list_for_each_entry(child, &ex_root->children, siblings) {
1626 if (child->dev_type == EDGE_DEV ||
1627 child->dev_type == FANOUT_DEV) {
1628 struct sas_expander_device *ex =
1629 rphy_to_expander_device(child->rphy);
1630
1631 if (level > ex->level)
1632 res = sas_discover_bfs_by_root_level(child,
1633 level);
1634 else if (level == ex->level)
1635 res = sas_ex_discover_devices(child, -1);
1636 }
1637 }
1638 return res;
1639}
1640
1641static int sas_discover_bfs_by_root(struct domain_device *dev)
1642{
1643 int res;
1644 struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy);
1645 int level = ex->level+1;
1646
1647 res = sas_ex_discover_devices(dev, -1);
1648 if (res)
1649 goto out;
1650 do {
1651 res = sas_discover_bfs_by_root_level(dev, level);
1652 mb();
1653 level += 1;
1654 } while (level <= dev->port->disc.max_level);
1655out:
1656 return res;
1657}
1658
1659static int sas_discover_new(struct domain_device *dev, int phy_id)
1660{
1661 struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id];
1662 struct domain_device *child;
1663 int res;
1664
1665 SAS_DPRINTK("ex %016llx phy%d new device attached\n",
1666 SAS_ADDR(dev->sas_addr), phy_id);
1667 res = sas_ex_phy_discover(dev, phy_id);
1668 if (res)
1669 goto out;
1670 res = sas_ex_discover_devices(dev, phy_id);
1671 if (res)
1672 goto out;
1673 list_for_each_entry(child, &dev->ex_dev.children, siblings) {
1674 if (SAS_ADDR(child->sas_addr) ==
1675 SAS_ADDR(ex_phy->attached_sas_addr)) {
1676 if (child->dev_type == EDGE_DEV ||
1677 child->dev_type == FANOUT_DEV)
1678 res = sas_discover_bfs_by_root(child);
1679 break;
1680 }
1681 }
1682out:
1683 return res;
1684}
1685
1686static int sas_rediscover_dev(struct domain_device *dev, int phy_id)
1687{
1688 struct expander_device *ex = &dev->ex_dev;
1689 struct ex_phy *phy = &ex->ex_phy[phy_id];
1690 u8 attached_sas_addr[8];
1691 int res;
1692
1693 res = sas_get_phy_attached_sas_addr(dev, phy_id, attached_sas_addr);
1694 switch (res) {
1695 case SMP_RESP_NO_PHY:
1696 phy->phy_state = PHY_NOT_PRESENT;
1697 sas_unregister_devs_sas_addr(dev, phy_id);
1698 goto out; break;
1699 case SMP_RESP_PHY_VACANT:
1700 phy->phy_state = PHY_VACANT;
1701 sas_unregister_devs_sas_addr(dev, phy_id);
1702 goto out; break;
1703 case SMP_RESP_FUNC_ACC:
1704 break;
1705 }
1706
1707 if (SAS_ADDR(attached_sas_addr) == 0) {
1708 phy->phy_state = PHY_EMPTY;
1709 sas_unregister_devs_sas_addr(dev, phy_id);
1710 } else if (SAS_ADDR(attached_sas_addr) ==
1711 SAS_ADDR(phy->attached_sas_addr)) {
1712 SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter\n",
1713 SAS_ADDR(dev->sas_addr), phy_id);
1714 sas_ex_phy_discover(dev, phy_id);
1715 } else
1716 res = sas_discover_new(dev, phy_id);
1717out:
1718 return res;
1719}
1720
1721static int sas_rediscover(struct domain_device *dev, const int phy_id)
1722{
1723 struct expander_device *ex = &dev->ex_dev;
1724 struct ex_phy *changed_phy = &ex->ex_phy[phy_id];
1725 int res = 0;
1726 int i;
1727
1728 SAS_DPRINTK("ex %016llx phy%d originated BROADCAST(CHANGE)\n",
1729 SAS_ADDR(dev->sas_addr), phy_id);
1730
1731 if (SAS_ADDR(changed_phy->attached_sas_addr) != 0) {
1732 for (i = 0; i < ex->num_phys; i++) {
1733 struct ex_phy *phy = &ex->ex_phy[i];
1734
1735 if (i == phy_id)
1736 continue;
1737 if (SAS_ADDR(phy->attached_sas_addr) ==
1738 SAS_ADDR(changed_phy->attached_sas_addr)) {
1739 SAS_DPRINTK("phy%d part of wide port with "
1740 "phy%d\n", phy_id, i);
1741 goto out;
1742 }
1743 }
1744 res = sas_rediscover_dev(dev, phy_id);
1745 } else
1746 res = sas_discover_new(dev, phy_id);
1747out:
1748 return res;
1749}
1750
1751/**
1752 * sas_revalidate_domain -- revalidate the domain
1753 * @port: port to the domain of interest
1754 *
1755 * NOTE: this process _must_ quit (return) as soon as any connection
1756 * errors are encountered. Connection recovery is done elsewhere.
1757 * Discover process only interrogates devices in order to discover the
1758 * domain.
1759 */
1760int sas_ex_revalidate_domain(struct domain_device *port_dev)
1761{
1762 int res;
1763 struct domain_device *dev = NULL;
1764
1765 res = sas_find_bcast_dev(port_dev, &dev);
1766 if (res)
1767 goto out;
1768 if (dev) {
1769 struct expander_device *ex = &dev->ex_dev;
1770 int i = 0, phy_id;
1771
1772 do {
1773 phy_id = -1;
1774 res = sas_find_bcast_phy(dev, &phy_id, i);
1775 if (phy_id == -1)
1776 break;
1777 res = sas_rediscover(dev, phy_id);
1778 i = phy_id + 1;
1779 } while (i < ex->num_phys);
1780 }
1781out:
1782 return res;
1783}
1784
1785#if 0
1786/* ---------- SMP portal ---------- */
1787
1788static ssize_t smp_portal_write(struct kobject *kobj, char *buf, loff_t offs,
1789 size_t size)
1790{
1791 struct domain_device *dev = to_dom_device(kobj);
1792 struct expander_device *ex = &dev->ex_dev;
1793
1794 if (offs != 0)
1795 return -EFBIG;
1796 else if (size == 0)
1797 return 0;
1798
1799 down_interruptible(&ex->smp_sema);
1800 if (ex->smp_req)
1801 kfree(ex->smp_req);
1802 ex->smp_req = kzalloc(size, GFP_USER);
1803 if (!ex->smp_req) {
1804 up(&ex->smp_sema);
1805 return -ENOMEM;
1806 }
1807 memcpy(ex->smp_req, buf, size);
1808 ex->smp_req_size = size;
1809 ex->smp_portal_pid = current->pid;
1810 up(&ex->smp_sema);
1811
1812 return size;
1813}
1814
1815static ssize_t smp_portal_read(struct kobject *kobj, char *buf, loff_t offs,
1816 size_t size)
1817{
1818 struct domain_device *dev = to_dom_device(kobj);
1819 struct expander_device *ex = &dev->ex_dev;
1820 u8 *smp_resp;
1821 int res = -EINVAL;
1822
1823 /* XXX: sysfs gives us an offset of 0x10 or 0x8 while in fact
1824 * it should be 0.
1825 */
1826
1827 down_interruptible(&ex->smp_sema);
1828 if (!ex->smp_req || ex->smp_portal_pid != current->pid)
1829 goto out;
1830
1831 res = 0;
1832 if (size == 0)
1833 goto out;
1834
1835 res = -ENOMEM;
1836 smp_resp = alloc_smp_resp(size);
1837 if (!smp_resp)
1838 goto out;
1839 res = smp_execute_task(dev, ex->smp_req, ex->smp_req_size,
1840 smp_resp, size);
1841 if (!res) {
1842 memcpy(buf, smp_resp, size);
1843 res = size;
1844 }
1845
1846 kfree(smp_resp);
1847out:
1848 kfree(ex->smp_req);
1849 ex->smp_req = NULL;
1850 ex->smp_req_size = 0;
1851 ex->smp_portal_pid = -1;
1852 up(&ex->smp_sema);
1853 return res;
1854}
1855#endif
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
new file mode 100644
index 000000000000..c836a237fb79
--- /dev/null
+++ b/drivers/scsi/libsas/sas_init.c
@@ -0,0 +1,267 @@
1/*
2 * Serial Attached SCSI (SAS) Transport Layer initialization
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23 *
24 */
25
26#include <linux/module.h>
27#include <linux/init.h>
28#include <linux/device.h>
29#include <linux/spinlock.h>
30#include <scsi/scsi_host.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_transport.h>
33#include <scsi/scsi_transport_sas.h>
34
35#include "sas_internal.h"
36
37#include "../scsi_sas_internal.h"
38
39kmem_cache_t *sas_task_cache;
40
41/*------------ SAS addr hash -----------*/
42void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
43{
44 const u32 poly = 0x00DB2777;
45 u32 r = 0;
46 int i;
47
48 for (i = 0; i < 8; i++) {
49 int b;
50 for (b = 7; b >= 0; b--) {
51 r <<= 1;
52 if ((1 << b) & sas_addr[i]) {
53 if (!(r & 0x01000000))
54 r ^= poly;
55 } else if (r & 0x01000000)
56 r ^= poly;
57 }
58 }
59
60 hashed[0] = (r >> 16) & 0xFF;
61 hashed[1] = (r >> 8) & 0xFF ;
62 hashed[2] = r & 0xFF;
63}
64
65
66/* ---------- HA events ---------- */
67
68void sas_hae_reset(void *data)
69{
70 struct sas_ha_struct *ha = data;
71
72 sas_begin_event(HAE_RESET, &ha->event_lock,
73 &ha->pending);
74}
75
76int sas_register_ha(struct sas_ha_struct *sas_ha)
77{
78 int error = 0;
79
80 spin_lock_init(&sas_ha->phy_port_lock);
81 sas_hash_addr(sas_ha->hashed_sas_addr, sas_ha->sas_addr);
82
83 if (sas_ha->lldd_queue_size == 0)
84 sas_ha->lldd_queue_size = 1;
85 else if (sas_ha->lldd_queue_size == -1)
86 sas_ha->lldd_queue_size = 128; /* Sanity */
87
88 error = sas_register_phys(sas_ha);
89 if (error) {
90 printk(KERN_NOTICE "couldn't register sas phys:%d\n", error);
91 return error;
92 }
93
94 error = sas_register_ports(sas_ha);
95 if (error) {
96 printk(KERN_NOTICE "couldn't register sas ports:%d\n", error);
97 goto Undo_phys;
98 }
99
100 error = sas_init_events(sas_ha);
101 if (error) {
102 printk(KERN_NOTICE "couldn't start event thread:%d\n", error);
103 goto Undo_ports;
104 }
105
106 if (sas_ha->lldd_max_execute_num > 1) {
107 error = sas_init_queue(sas_ha);
108 if (error) {
109 printk(KERN_NOTICE "couldn't start queue thread:%d, "
110 "running in direct mode\n", error);
111 sas_ha->lldd_max_execute_num = 1;
112 }
113 }
114
115 return 0;
116
117Undo_ports:
118 sas_unregister_ports(sas_ha);
119Undo_phys:
120
121 return error;
122}
123
124int sas_unregister_ha(struct sas_ha_struct *sas_ha)
125{
126 if (sas_ha->lldd_max_execute_num > 1) {
127 sas_shutdown_queue(sas_ha);
128 }
129
130 sas_unregister_ports(sas_ha);
131
132 return 0;
133}
134
135static int sas_get_linkerrors(struct sas_phy *phy)
136{
137 if (scsi_is_sas_phy_local(phy))
138 /* FIXME: we have no local phy stats
139 * gathering at this time */
140 return -EINVAL;
141
142 return sas_smp_get_phy_events(phy);
143}
144
145static int sas_phy_reset(struct sas_phy *phy, int hard_reset)
146{
147 int ret;
148 enum phy_func reset_type;
149
150 if (hard_reset)
151 reset_type = PHY_FUNC_HARD_RESET;
152 else
153 reset_type = PHY_FUNC_LINK_RESET;
154
155 if (scsi_is_sas_phy_local(phy)) {
156 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
157 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
158 struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
159 struct sas_internal *i =
160 to_sas_internal(sas_ha->core.shost->transportt);
161
162 ret = i->dft->lldd_control_phy(asd_phy, reset_type, NULL);
163 } else {
164 struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
165 struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
166 ret = sas_smp_phy_control(ddev, phy->number, reset_type, NULL);
167 }
168 return ret;
169}
170
171static int sas_set_phy_speed(struct sas_phy *phy,
172 struct sas_phy_linkrates *rates)
173{
174 int ret;
175
176 if ((rates->minimum_linkrate &&
177 rates->minimum_linkrate > phy->maximum_linkrate) ||
178 (rates->maximum_linkrate &&
179 rates->maximum_linkrate < phy->minimum_linkrate))
180 return -EINVAL;
181
182 if (rates->minimum_linkrate &&
183 rates->minimum_linkrate < phy->minimum_linkrate_hw)
184 rates->minimum_linkrate = phy->minimum_linkrate_hw;
185
186 if (rates->maximum_linkrate &&
187 rates->maximum_linkrate > phy->maximum_linkrate_hw)
188 rates->maximum_linkrate = phy->maximum_linkrate_hw;
189
190 if (scsi_is_sas_phy_local(phy)) {
191 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
192 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
193 struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
194 struct sas_internal *i =
195 to_sas_internal(sas_ha->core.shost->transportt);
196
197 ret = i->dft->lldd_control_phy(asd_phy, PHY_FUNC_SET_LINK_RATE,
198 rates);
199 } else {
200 struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
201 struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
202 ret = sas_smp_phy_control(ddev, phy->number,
203 PHY_FUNC_LINK_RESET, rates);
204
205 }
206
207 return ret;
208}
209
210static struct sas_function_template sft = {
211 .phy_reset = sas_phy_reset,
212 .set_phy_speed = sas_set_phy_speed,
213 .get_linkerrors = sas_get_linkerrors,
214};
215
216struct scsi_transport_template *
217sas_domain_attach_transport(struct sas_domain_function_template *dft)
218{
219 struct scsi_transport_template *stt = sas_attach_transport(&sft);
220 struct sas_internal *i;
221
222 if (!stt)
223 return stt;
224
225 i = to_sas_internal(stt);
226 i->dft = dft;
227 stt->create_work_queue = 1;
228 stt->eh_timed_out = sas_scsi_timed_out;
229 stt->eh_strategy_handler = sas_scsi_recover_host;
230
231 return stt;
232}
233EXPORT_SYMBOL_GPL(sas_domain_attach_transport);
234
235
236void sas_domain_release_transport(struct scsi_transport_template *stt)
237{
238 sas_release_transport(stt);
239}
240EXPORT_SYMBOL_GPL(sas_domain_release_transport);
241
242/* ---------- SAS Class register/unregister ---------- */
243
244static int __init sas_class_init(void)
245{
246 sas_task_cache = kmem_cache_create("sas_task", sizeof(struct sas_task),
247 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
248 if (!sas_task_cache)
249 return -ENOMEM;
250
251 return 0;
252}
253
254static void __exit sas_class_exit(void)
255{
256 kmem_cache_destroy(sas_task_cache);
257}
258
259MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>");
260MODULE_DESCRIPTION("SAS Transport Layer");
261MODULE_LICENSE("GPL v2");
262
263module_init(sas_class_init);
264module_exit(sas_class_exit);
265
266EXPORT_SYMBOL_GPL(sas_register_ha);
267EXPORT_SYMBOL_GPL(sas_unregister_ha);
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
new file mode 100644
index 000000000000..bffcee474921
--- /dev/null
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -0,0 +1,146 @@
1/*
2 * Serial Attached SCSI (SAS) class internal header file
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23 *
24 */
25
26#ifndef _SAS_INTERNAL_H_
27#define _SAS_INTERNAL_H_
28
29#include <scsi/scsi.h>
30#include <scsi/scsi_host.h>
31#include <scsi/scsi_transport_sas.h>
32#include <scsi/libsas.h>
33
34#define sas_printk(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__)
35
36#ifdef SAS_DEBUG
37#define SAS_DPRINTK(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__)
38#else
39#define SAS_DPRINTK(fmt, ...)
40#endif
41
42void sas_scsi_recover_host(struct Scsi_Host *shost);
43
44int sas_show_class(enum sas_class class, char *buf);
45int sas_show_proto(enum sas_proto proto, char *buf);
46int sas_show_linkrate(enum sas_linkrate linkrate, char *buf);
47int sas_show_oob_mode(enum sas_oob_mode oob_mode, char *buf);
48
49int sas_register_phys(struct sas_ha_struct *sas_ha);
50void sas_unregister_phys(struct sas_ha_struct *sas_ha);
51
52int sas_register_ports(struct sas_ha_struct *sas_ha);
53void sas_unregister_ports(struct sas_ha_struct *sas_ha);
54
55enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
56
57int sas_init_queue(struct sas_ha_struct *sas_ha);
58int sas_init_events(struct sas_ha_struct *sas_ha);
59void sas_shutdown_queue(struct sas_ha_struct *sas_ha);
60
61void sas_deform_port(struct asd_sas_phy *phy);
62
63void sas_porte_bytes_dmaed(void *);
64void sas_porte_broadcast_rcvd(void *);
65void sas_porte_link_reset_err(void *);
66void sas_porte_timer_event(void *);
67void sas_porte_hard_reset(void *);
68
69int sas_notify_lldd_dev_found(struct domain_device *);
70void sas_notify_lldd_dev_gone(struct domain_device *);
71
72int sas_smp_phy_control(struct domain_device *dev, int phy_id,
73 enum phy_func phy_func, struct sas_phy_linkrates *);
74int sas_smp_get_phy_events(struct sas_phy *phy);
75
76struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
77
78void sas_hae_reset(void *);
79
80static inline void sas_queue_event(int event, spinlock_t *lock,
81 unsigned long *pending,
82 struct work_struct *work,
83 struct Scsi_Host *shost)
84{
85 unsigned long flags;
86
87 spin_lock_irqsave(lock, flags);
88 if (test_bit(event, pending)) {
89 spin_unlock_irqrestore(lock, flags);
90 return;
91 }
92 __set_bit(event, pending);
93 spin_unlock_irqrestore(lock, flags);
94 scsi_queue_work(shost, work);
95}
96
97static inline void sas_begin_event(int event, spinlock_t *lock,
98 unsigned long *pending)
99{
100 unsigned long flags;
101
102 spin_lock_irqsave(lock, flags);
103 __clear_bit(event, pending);
104 spin_unlock_irqrestore(lock, flags);
105}
106
107static inline void sas_fill_in_rphy(struct domain_device *dev,
108 struct sas_rphy *rphy)
109{
110 rphy->identify.sas_address = SAS_ADDR(dev->sas_addr);
111 rphy->identify.initiator_port_protocols = dev->iproto;
112 rphy->identify.target_port_protocols = dev->tproto;
113 switch (dev->dev_type) {
114 case SATA_DEV:
115 /* FIXME: need sata device type */
116 case SAS_END_DEV:
117 rphy->identify.device_type = SAS_END_DEVICE;
118 break;
119 case EDGE_DEV:
120 rphy->identify.device_type = SAS_EDGE_EXPANDER_DEVICE;
121 break;
122 case FANOUT_DEV:
123 rphy->identify.device_type = SAS_FANOUT_EXPANDER_DEVICE;
124 break;
125 default:
126 rphy->identify.device_type = SAS_PHY_UNUSED;
127 break;
128 }
129}
130
131static inline void sas_add_parent_port(struct domain_device *dev, int phy_id)
132{
133 struct expander_device *ex = &dev->ex_dev;
134 struct ex_phy *ex_phy = &ex->ex_phy[phy_id];
135
136 if (!ex->parent_port) {
137 ex->parent_port = sas_port_alloc(&dev->rphy->dev, phy_id);
138 /* FIXME: error handling */
139 BUG_ON(!ex->parent_port);
140 BUG_ON(sas_port_add(ex->parent_port));
141 sas_port_mark_backlink(ex->parent_port);
142 }
143 sas_port_add_phy(ex->parent_port, ex_phy->phy);
144}
145
146#endif /* _SAS_INTERNAL_H_ */
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
new file mode 100644
index 000000000000..9340cdbae4a3
--- /dev/null
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -0,0 +1,158 @@
1/*
2 * Serial Attached SCSI (SAS) Phy class
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */
24
25#include "sas_internal.h"
26#include <scsi/scsi_host.h>
27#include <scsi/scsi_transport.h>
28#include <scsi/scsi_transport_sas.h>
29#include "../scsi_sas_internal.h"
30
31/* ---------- Phy events ---------- */
32
33static void sas_phye_loss_of_signal(void *data)
34{
35 struct asd_sas_phy *phy = data;
36
37 sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock,
38 &phy->phy_events_pending);
39 phy->error = 0;
40 sas_deform_port(phy);
41}
42
43static void sas_phye_oob_done(void *data)
44{
45 struct asd_sas_phy *phy = data;
46
47 sas_begin_event(PHYE_OOB_DONE, &phy->ha->event_lock,
48 &phy->phy_events_pending);
49 phy->error = 0;
50}
51
52static void sas_phye_oob_error(void *data)
53{
54 struct asd_sas_phy *phy = data;
55 struct sas_ha_struct *sas_ha = phy->ha;
56 struct asd_sas_port *port = phy->port;
57 struct sas_internal *i =
58 to_sas_internal(sas_ha->core.shost->transportt);
59
60 sas_begin_event(PHYE_OOB_ERROR, &phy->ha->event_lock,
61 &phy->phy_events_pending);
62
63 sas_deform_port(phy);
64
65 if (!port && phy->enabled && i->dft->lldd_control_phy) {
66 phy->error++;
67 switch (phy->error) {
68 case 1:
69 case 2:
70 i->dft->lldd_control_phy(phy, PHY_FUNC_HARD_RESET,
71 NULL);
72 break;
73 case 3:
74 default:
75 phy->error = 0;
76 phy->enabled = 0;
77 i->dft->lldd_control_phy(phy, PHY_FUNC_DISABLE, NULL);
78 break;
79 }
80 }
81}
82
83static void sas_phye_spinup_hold(void *data)
84{
85 struct asd_sas_phy *phy = data;
86 struct sas_ha_struct *sas_ha = phy->ha;
87 struct sas_internal *i =
88 to_sas_internal(sas_ha->core.shost->transportt);
89
90 sas_begin_event(PHYE_SPINUP_HOLD, &phy->ha->event_lock,
91 &phy->phy_events_pending);
92
93 phy->error = 0;
94 i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL);
95}
96
97/* ---------- Phy class registration ---------- */
98
99int sas_register_phys(struct sas_ha_struct *sas_ha)
100{
101 int i;
102
103 static void (*sas_phy_event_fns[PHY_NUM_EVENTS])(void *) = {
104 [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
105 [PHYE_OOB_DONE] = sas_phye_oob_done,
106 [PHYE_OOB_ERROR] = sas_phye_oob_error,
107 [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
108 };
109
110 static void (*sas_port_event_fns[PORT_NUM_EVENTS])(void *) = {
111 [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
112 [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
113 [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
114 [PORTE_TIMER_EVENT] = sas_porte_timer_event,
115 [PORTE_HARD_RESET] = sas_porte_hard_reset,
116 };
117
118 /* Now register the phys. */
119 for (i = 0; i < sas_ha->num_phys; i++) {
120 int k;
121 struct asd_sas_phy *phy = sas_ha->sas_phy[i];
122
123 phy->error = 0;
124 INIT_LIST_HEAD(&phy->port_phy_el);
125 for (k = 0; k < PORT_NUM_EVENTS; k++)
126 INIT_WORK(&phy->port_events[k], sas_port_event_fns[k],
127 phy);
128
129 for (k = 0; k < PHY_NUM_EVENTS; k++)
130 INIT_WORK(&phy->phy_events[k], sas_phy_event_fns[k],
131 phy);
132 phy->port = NULL;
133 phy->ha = sas_ha;
134 spin_lock_init(&phy->frame_rcvd_lock);
135 spin_lock_init(&phy->sas_prim_lock);
136 phy->frame_rcvd_size = 0;
137
138 phy->phy = sas_phy_alloc(&sas_ha->core.shost->shost_gendev,
139 i);
140 if (!phy->phy)
141 return -ENOMEM;
142
143 phy->phy->identify.initiator_port_protocols =
144 phy->iproto;
145 phy->phy->identify.target_port_protocols = phy->tproto;
146 phy->phy->identify.sas_address = SAS_ADDR(sas_ha->sas_addr);
147 phy->phy->identify.phy_identifier = i;
148 phy->phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
149 phy->phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
150 phy->phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
151 phy->phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
152 phy->phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
153
154 sas_phy_add(phy->phy);
155 }
156
157 return 0;
158}
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
new file mode 100644
index 000000000000..253cdcf306a2
--- /dev/null
+++ b/drivers/scsi/libsas/sas_port.c
@@ -0,0 +1,279 @@
1/*
2 * Serial Attached SCSI (SAS) Port class
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */
24
25#include "sas_internal.h"
26
27#include <scsi/scsi_transport.h>
28#include <scsi/scsi_transport_sas.h>
29#include "../scsi_sas_internal.h"
30
31/**
32 * sas_form_port -- add this phy to a port
33 * @phy: the phy of interest
34 *
35 * This function adds this phy to an existing port, thus creating a wide
36 * port, or it creates a port and adds the phy to the port.
37 */
38static void sas_form_port(struct asd_sas_phy *phy)
39{
40 int i;
41 struct sas_ha_struct *sas_ha = phy->ha;
42 struct asd_sas_port *port = phy->port;
43 struct sas_internal *si =
44 to_sas_internal(sas_ha->core.shost->transportt);
45
46 if (port) {
47 if (memcmp(port->attached_sas_addr, phy->attached_sas_addr,
48 SAS_ADDR_SIZE) == 0)
49 sas_deform_port(phy);
50 else {
51 SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
52 __FUNCTION__, phy->id, phy->port->id,
53 phy->port->num_phys);
54 return;
55 }
56 }
57
58 /* find a port */
59 spin_lock(&sas_ha->phy_port_lock);
60 for (i = 0; i < sas_ha->num_phys; i++) {
61 port = sas_ha->sas_port[i];
62 spin_lock(&port->phy_list_lock);
63 if (*(u64 *) port->sas_addr &&
64 memcmp(port->attached_sas_addr,
65 phy->attached_sas_addr, SAS_ADDR_SIZE) == 0 &&
66 port->num_phys > 0) {
67 /* wide port */
68 SAS_DPRINTK("phy%d matched wide port%d\n", phy->id,
69 port->id);
70 break;
71 } else if (*(u64 *) port->sas_addr == 0 && port->num_phys==0) {
72 memcpy(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE);
73 break;
74 }
75 spin_unlock(&port->phy_list_lock);
76 }
77
78 if (i >= sas_ha->num_phys) {
79 printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n",
80 __FUNCTION__);
81 spin_unlock(&sas_ha->phy_port_lock);
82 return;
83 }
84
85 /* add the phy to the port */
86 list_add_tail(&phy->port_phy_el, &port->phy_list);
87 phy->port = port;
88 port->num_phys++;
89 port->phy_mask |= (1U << phy->id);
90
91 if (!port->phy)
92 port->phy = phy->phy;
93
94 SAS_DPRINTK("phy%d added to port%d, phy_mask:0x%x\n", phy->id,
95 port->id, port->phy_mask);
96
97 if (*(u64 *)port->attached_sas_addr == 0) {
98 port->class = phy->class;
99 memcpy(port->attached_sas_addr, phy->attached_sas_addr,
100 SAS_ADDR_SIZE);
101 port->iproto = phy->iproto;
102 port->tproto = phy->tproto;
103 port->oob_mode = phy->oob_mode;
104 port->linkrate = phy->linkrate;
105 } else
106 port->linkrate = max(port->linkrate, phy->linkrate);
107 spin_unlock(&port->phy_list_lock);
108 spin_unlock(&sas_ha->phy_port_lock);
109
110 if (!port->port) {
111 port->port = sas_port_alloc(phy->phy->dev.parent, port->id);
112 BUG_ON(!port->port);
113 sas_port_add(port->port);
114 }
115 sas_port_add_phy(port->port, phy->phy);
116
117 if (port->port_dev)
118 port->port_dev->pathways = port->num_phys;
119
120 /* Tell the LLDD about this port formation. */
121 if (si->dft->lldd_port_formed)
122 si->dft->lldd_port_formed(phy);
123
124 sas_discover_event(phy->port, DISCE_DISCOVER_DOMAIN);
125}
126
127/**
128 * sas_deform_port -- remove this phy from the port it belongs to
129 * @phy: the phy of interest
130 *
131 * This is called when the physical link to the other phy has been
132 * lost (on this phy), in Event thread context. We cannot delay here.
133 */
134void sas_deform_port(struct asd_sas_phy *phy)
135{
136 struct sas_ha_struct *sas_ha = phy->ha;
137 struct asd_sas_port *port = phy->port;
138 struct sas_internal *si =
139 to_sas_internal(sas_ha->core.shost->transportt);
140
141 if (!port)
142 return; /* done by a phy event */
143
144 if (port->port_dev)
145 port->port_dev->pathways--;
146
147 if (port->num_phys == 1) {
148 sas_unregister_domain_devices(port);
149 sas_port_delete(port->port);
150 port->port = NULL;
151 } else
152 sas_port_delete_phy(port->port, phy->phy);
153
154
155 if (si->dft->lldd_port_deformed)
156 si->dft->lldd_port_deformed(phy);
157
158 spin_lock(&sas_ha->phy_port_lock);
159 spin_lock(&port->phy_list_lock);
160
161 list_del_init(&phy->port_phy_el);
162 phy->port = NULL;
163 port->num_phys--;
164 port->phy_mask &= ~(1U << phy->id);
165
166 if (port->num_phys == 0) {
167 INIT_LIST_HEAD(&port->phy_list);
168 memset(port->sas_addr, 0, SAS_ADDR_SIZE);
169 memset(port->attached_sas_addr, 0, SAS_ADDR_SIZE);
170 port->class = 0;
171 port->iproto = 0;
172 port->tproto = 0;
173 port->oob_mode = 0;
174 port->phy_mask = 0;
175 }
176 spin_unlock(&port->phy_list_lock);
177 spin_unlock(&sas_ha->phy_port_lock);
178
179 return;
180}
181
182/* ---------- SAS port events ---------- */
183
184void sas_porte_bytes_dmaed(void *data)
185{
186 struct asd_sas_phy *phy = data;
187
188 sas_begin_event(PORTE_BYTES_DMAED, &phy->ha->event_lock,
189 &phy->port_events_pending);
190
191 sas_form_port(phy);
192}
193
194void sas_porte_broadcast_rcvd(void *data)
195{
196 unsigned long flags;
197 u32 prim;
198 struct asd_sas_phy *phy = data;
199
200 sas_begin_event(PORTE_BROADCAST_RCVD, &phy->ha->event_lock,
201 &phy->port_events_pending);
202
203 spin_lock_irqsave(&phy->sas_prim_lock, flags);
204 prim = phy->sas_prim;
205 spin_unlock_irqrestore(&phy->sas_prim_lock, flags);
206
207 SAS_DPRINTK("broadcast received: %d\n", prim);
208 sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN);
209}
210
211void sas_porte_link_reset_err(void *data)
212{
213 struct asd_sas_phy *phy = data;
214
215 sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock,
216 &phy->port_events_pending);
217
218 sas_deform_port(phy);
219}
220
221void sas_porte_timer_event(void *data)
222{
223 struct asd_sas_phy *phy = data;
224
225 sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock,
226 &phy->port_events_pending);
227
228 sas_deform_port(phy);
229}
230
231void sas_porte_hard_reset(void *data)
232{
233 struct asd_sas_phy *phy = data;
234
235 sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock,
236 &phy->port_events_pending);
237
238 sas_deform_port(phy);
239}
240
241/* ---------- SAS port registration ---------- */
242
243static void sas_init_port(struct asd_sas_port *port,
244 struct sas_ha_struct *sas_ha, int i)
245{
246 port->id = i;
247 INIT_LIST_HEAD(&port->dev_list);
248 spin_lock_init(&port->phy_list_lock);
249 INIT_LIST_HEAD(&port->phy_list);
250 port->num_phys = 0;
251 port->phy_mask = 0;
252 port->ha = sas_ha;
253
254 spin_lock_init(&port->dev_list_lock);
255}
256
257int sas_register_ports(struct sas_ha_struct *sas_ha)
258{
259 int i;
260
261 /* initialize the ports and discovery */
262 for (i = 0; i < sas_ha->num_phys; i++) {
263 struct asd_sas_port *port = sas_ha->sas_port[i];
264
265 sas_init_port(port, sas_ha, i);
266 sas_init_disc(&port->disc, port);
267 }
268 return 0;
269}
270
271void sas_unregister_ports(struct sas_ha_struct *sas_ha)
272{
273 int i;
274
275 for (i = 0; i < sas_ha->num_phys; i++)
276 if (sas_ha->sas_phy[i]->port)
277 sas_deform_port(sas_ha->sas_phy[i]);
278
279}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
new file mode 100644
index 000000000000..43e0e4e36934
--- /dev/null
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -0,0 +1,786 @@
1/*
2 * Serial Attached SCSI (SAS) class SCSI Host glue.
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23 *
24 */
25
26#include "sas_internal.h"
27
28#include <scsi/scsi_host.h>
29#include <scsi/scsi_device.h>
30#include <scsi/scsi_tcq.h>
31#include <scsi/scsi.h>
32#include <scsi/scsi_transport.h>
33#include <scsi/scsi_transport_sas.h>
34#include "../scsi_sas_internal.h"
35
36#include <linux/err.h>
37#include <linux/blkdev.h>
38#include <linux/scatterlist.h>
39
40/* ---------- SCSI Host glue ---------- */
41
42#define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble)
43#define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0)
44
45static void sas_scsi_task_done(struct sas_task *task)
46{
47 struct task_status_struct *ts = &task->task_status;
48 struct scsi_cmnd *sc = task->uldd_task;
49 unsigned ts_flags = task->task_state_flags;
50 int hs = 0, stat = 0;
51
52 if (unlikely(!sc)) {
53 SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
54 list_del_init(&task->list);
55 sas_free_task(task);
56 return;
57 }
58
59 if (ts->resp == SAS_TASK_UNDELIVERED) {
60 /* transport error */
61 hs = DID_NO_CONNECT;
62 } else { /* ts->resp == SAS_TASK_COMPLETE */
63 /* task delivered, what happened afterwards? */
64 switch (ts->stat) {
65 case SAS_DEV_NO_RESPONSE:
66 case SAS_INTERRUPTED:
67 case SAS_PHY_DOWN:
68 case SAS_NAK_R_ERR:
69 case SAS_OPEN_TO:
70 hs = DID_NO_CONNECT;
71 break;
72 case SAS_DATA_UNDERRUN:
73 sc->resid = ts->residual;
74 if (sc->request_bufflen - sc->resid < sc->underflow)
75 hs = DID_ERROR;
76 break;
77 case SAS_DATA_OVERRUN:
78 hs = DID_ERROR;
79 break;
80 case SAS_QUEUE_FULL:
81 hs = DID_SOFT_ERROR; /* retry */
82 break;
83 case SAS_DEVICE_UNKNOWN:
84 hs = DID_BAD_TARGET;
85 break;
86 case SAS_SG_ERR:
87 hs = DID_PARITY;
88 break;
89 case SAS_OPEN_REJECT:
90 if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY)
91 hs = DID_SOFT_ERROR; /* retry */
92 else
93 hs = DID_ERROR;
94 break;
95 case SAS_PROTO_RESPONSE:
96 SAS_DPRINTK("LLDD:%s sent SAS_PROTO_RESP for an SSP "
97 "task; please report this\n",
98 task->dev->port->ha->sas_ha_name);
99 break;
100 case SAS_ABORTED_TASK:
101 hs = DID_ABORT;
102 break;
103 case SAM_CHECK_COND:
104 memcpy(sc->sense_buffer, ts->buf,
105 max(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
106 stat = SAM_CHECK_COND;
107 break;
108 default:
109 stat = ts->stat;
110 break;
111 }
112 }
113 ASSIGN_SAS_TASK(sc, NULL);
114 sc->result = (hs << 16) | stat;
115 list_del_init(&task->list);
116 sas_free_task(task);
117 /* This is very ugly but this is how SCSI Core works. */
118 if (ts_flags & SAS_TASK_STATE_ABORTED)
119 scsi_finish_command(sc);
120 else
121 sc->scsi_done(sc);
122}
123
124static enum task_attribute sas_scsi_get_task_attr(struct scsi_cmnd *cmd)
125{
126 enum task_attribute ta = TASK_ATTR_SIMPLE;
127 if (cmd->request && blk_rq_tagged(cmd->request)) {
128 if (cmd->device->ordered_tags &&
129 (cmd->request->flags & REQ_HARDBARRIER))
130 ta = TASK_ATTR_HOQ;
131 }
132 return ta;
133}
134
135static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
136 struct domain_device *dev,
137 unsigned long gfp_flags)
138{
139 struct sas_task *task = sas_alloc_task(gfp_flags);
140 struct scsi_lun lun;
141
142 if (!task)
143 return NULL;
144
145 *(u32 *)cmd->sense_buffer = 0;
146 task->uldd_task = cmd;
147 ASSIGN_SAS_TASK(cmd, task);
148
149 task->dev = dev;
150 task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */
151
152 task->ssp_task.retry_count = 1;
153 int_to_scsilun(cmd->device->lun, &lun);
154 memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8);
155 task->ssp_task.task_attr = sas_scsi_get_task_attr(cmd);
156 memcpy(task->ssp_task.cdb, cmd->cmnd, 16);
157
158 task->scatter = cmd->request_buffer;
159 task->num_scatter = cmd->use_sg;
160 task->total_xfer_len = cmd->request_bufflen;
161 task->data_dir = cmd->sc_data_direction;
162
163 task->task_done = sas_scsi_task_done;
164
165 return task;
166}
167
168static int sas_queue_up(struct sas_task *task)
169{
170 struct sas_ha_struct *sas_ha = task->dev->port->ha;
171 struct scsi_core *core = &sas_ha->core;
172 unsigned long flags;
173 LIST_HEAD(list);
174
175 spin_lock_irqsave(&core->task_queue_lock, flags);
176 if (sas_ha->lldd_queue_size < core->task_queue_size + 1) {
177 spin_unlock_irqrestore(&core->task_queue_lock, flags);
178 return -SAS_QUEUE_FULL;
179 }
180 list_add_tail(&task->list, &core->task_queue);
181 core->task_queue_size += 1;
182 spin_unlock_irqrestore(&core->task_queue_lock, flags);
183 up(&core->queue_thread_sema);
184
185 return 0;
186}
187
188/**
189 * sas_queuecommand -- Enqueue a command for processing
190 * @parameters: See SCSI Core documentation
191 *
192 * Note: XXX: Remove the host unlock/lock pair when SCSI Core can
193 * call us without holding an IRQ spinlock...
194 */
195int sas_queuecommand(struct scsi_cmnd *cmd,
196 void (*scsi_done)(struct scsi_cmnd *))
197{
198 int res = 0;
199 struct domain_device *dev = cmd_to_domain_dev(cmd);
200 struct Scsi_Host *host = cmd->device->host;
201 struct sas_internal *i = to_sas_internal(host->transportt);
202
203 spin_unlock_irq(host->host_lock);
204
205 {
206 struct sas_ha_struct *sas_ha = dev->port->ha;
207 struct sas_task *task;
208
209 res = -ENOMEM;
210 task = sas_create_task(cmd, dev, GFP_ATOMIC);
211 if (!task)
212 goto out;
213
214 cmd->scsi_done = scsi_done;
215 /* Queue up, Direct Mode or Task Collector Mode. */
216 if (sas_ha->lldd_max_execute_num < 2)
217 res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
218 else
219 res = sas_queue_up(task);
220
221 /* Examine */
222 if (res) {
223 SAS_DPRINTK("lldd_execute_task returned: %d\n", res);
224 ASSIGN_SAS_TASK(cmd, NULL);
225 sas_free_task(task);
226 if (res == -SAS_QUEUE_FULL) {
227 cmd->result = DID_SOFT_ERROR << 16; /* retry */
228 res = 0;
229 scsi_done(cmd);
230 }
231 goto out;
232 }
233 }
234out:
235 spin_lock_irq(host->host_lock);
236 return res;
237}
238
239static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
240{
241 struct scsi_cmnd *cmd, *n;
242
243 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
244 if (cmd == my_cmd)
245 list_del_init(&cmd->eh_entry);
246 }
247}
248
249static void sas_scsi_clear_queue_I_T(struct list_head *error_q,
250 struct domain_device *dev)
251{
252 struct scsi_cmnd *cmd, *n;
253
254 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
255 struct domain_device *x = cmd_to_domain_dev(cmd);
256
257 if (x == dev)
258 list_del_init(&cmd->eh_entry);
259 }
260}
261
262static void sas_scsi_clear_queue_port(struct list_head *error_q,
263 struct asd_sas_port *port)
264{
265 struct scsi_cmnd *cmd, *n;
266
267 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
268 struct domain_device *dev = cmd_to_domain_dev(cmd);
269 struct asd_sas_port *x = dev->port;
270
271 if (x == port)
272 list_del_init(&cmd->eh_entry);
273 }
274}
275
276enum task_disposition {
277 TASK_IS_DONE,
278 TASK_IS_ABORTED,
279 TASK_IS_AT_LU,
280 TASK_IS_NOT_AT_LU,
281};
282
283static enum task_disposition sas_scsi_find_task(struct sas_task *task)
284{
285 struct sas_ha_struct *ha = task->dev->port->ha;
286 unsigned long flags;
287 int i, res;
288 struct sas_internal *si =
289 to_sas_internal(task->dev->port->ha->core.shost->transportt);
290
291 if (ha->lldd_max_execute_num > 1) {
292 struct scsi_core *core = &ha->core;
293 struct sas_task *t, *n;
294
295 spin_lock_irqsave(&core->task_queue_lock, flags);
296 list_for_each_entry_safe(t, n, &core->task_queue, list) {
297 if (task == t) {
298 list_del_init(&t->list);
299 spin_unlock_irqrestore(&core->task_queue_lock,
300 flags);
301 SAS_DPRINTK("%s: task 0x%p aborted from "
302 "task_queue\n",
303 __FUNCTION__, task);
304 return TASK_IS_ABORTED;
305 }
306 }
307 spin_unlock_irqrestore(&core->task_queue_lock, flags);
308 }
309
310 for (i = 0; i < 5; i++) {
311 SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__, task);
312 res = si->dft->lldd_abort_task(task);
313
314 spin_lock_irqsave(&task->task_state_lock, flags);
315 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
316 spin_unlock_irqrestore(&task->task_state_lock, flags);
317 SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__,
318 task);
319 return TASK_IS_DONE;
320 }
321 spin_unlock_irqrestore(&task->task_state_lock, flags);
322
323 if (res == TMF_RESP_FUNC_COMPLETE) {
324 SAS_DPRINTK("%s: task 0x%p is aborted\n",
325 __FUNCTION__, task);
326 return TASK_IS_ABORTED;
327 } else if (si->dft->lldd_query_task) {
328 SAS_DPRINTK("%s: querying task 0x%p\n",
329 __FUNCTION__, task);
330 res = si->dft->lldd_query_task(task);
331 if (res == TMF_RESP_FUNC_SUCC) {
332 SAS_DPRINTK("%s: task 0x%p at LU\n",
333 __FUNCTION__, task);
334 return TASK_IS_AT_LU;
335 } else if (res == TMF_RESP_FUNC_COMPLETE) {
336 SAS_DPRINTK("%s: task 0x%p not at LU\n",
337 __FUNCTION__, task);
338 return TASK_IS_NOT_AT_LU;
339 }
340 }
341 }
342 return res;
343}
344
345static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
346{
347 int res = TMF_RESP_FUNC_FAILED;
348 struct scsi_lun lun;
349 struct sas_internal *i =
350 to_sas_internal(dev->port->ha->core.shost->transportt);
351
352 int_to_scsilun(cmd->device->lun, &lun);
353
354 SAS_DPRINTK("eh: device %llx LUN %x has the task\n",
355 SAS_ADDR(dev->sas_addr),
356 cmd->device->lun);
357
358 if (i->dft->lldd_abort_task_set)
359 res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun);
360
361 if (res == TMF_RESP_FUNC_FAILED) {
362 if (i->dft->lldd_clear_task_set)
363 res = i->dft->lldd_clear_task_set(dev, lun.scsi_lun);
364 }
365
366 if (res == TMF_RESP_FUNC_FAILED) {
367 if (i->dft->lldd_lu_reset)
368 res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
369 }
370
371 return res;
372}
373
374static int sas_recover_I_T(struct domain_device *dev)
375{
376 int res = TMF_RESP_FUNC_FAILED;
377 struct sas_internal *i =
378 to_sas_internal(dev->port->ha->core.shost->transportt);
379
380 SAS_DPRINTK("I_T nexus reset for dev %016llx\n",
381 SAS_ADDR(dev->sas_addr));
382
383 if (i->dft->lldd_I_T_nexus_reset)
384 res = i->dft->lldd_I_T_nexus_reset(dev);
385
386 return res;
387}
388
389void sas_scsi_recover_host(struct Scsi_Host *shost)
390{
391 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
392 unsigned long flags;
393 LIST_HEAD(error_q);
394 struct scsi_cmnd *cmd, *n;
395 enum task_disposition res = TASK_IS_DONE;
396 int tmf_resp;
397 struct sas_internal *i = to_sas_internal(shost->transportt);
398
399 spin_lock_irqsave(shost->host_lock, flags);
400 list_splice_init(&shost->eh_cmd_q, &error_q);
401 spin_unlock_irqrestore(shost->host_lock, flags);
402
403 SAS_DPRINTK("Enter %s\n", __FUNCTION__);
404
405 /* All tasks on this list were marked SAS_TASK_STATE_ABORTED
406 * by sas_scsi_timed_out() callback.
407 */
408Again:
409 SAS_DPRINTK("going over list...\n");
410 list_for_each_entry_safe(cmd, n, &error_q, eh_entry) {
411 struct sas_task *task = TO_SAS_TASK(cmd);
412
413 SAS_DPRINTK("trying to find task 0x%p\n", task);
414 list_del_init(&cmd->eh_entry);
415 res = sas_scsi_find_task(task);
416
417 cmd->eh_eflags = 0;
418 shost->host_failed--;
419
420 switch (res) {
421 case TASK_IS_DONE:
422 SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__,
423 task);
424 task->task_done(task);
425 continue;
426 case TASK_IS_ABORTED:
427 SAS_DPRINTK("%s: task 0x%p is aborted\n",
428 __FUNCTION__, task);
429 task->task_done(task);
430 continue;
431 case TASK_IS_AT_LU:
432 SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
433 tmf_resp = sas_recover_lu(task->dev, cmd);
434 if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
435 SAS_DPRINTK("dev %016llx LU %x is "
436 "recovered\n",
437 SAS_ADDR(task->dev),
438 cmd->device->lun);
439 task->task_done(task);
440 sas_scsi_clear_queue_lu(&error_q, cmd);
441 goto Again;
442 }
443 /* fallthrough */
444 case TASK_IS_NOT_AT_LU:
445 SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n",
446 task);
447 tmf_resp = sas_recover_I_T(task->dev);
448 if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
449 SAS_DPRINTK("I_T %016llx recovered\n",
450 SAS_ADDR(task->dev->sas_addr));
451 task->task_done(task);
452 sas_scsi_clear_queue_I_T(&error_q, task->dev);
453 goto Again;
454 }
455 /* Hammer time :-) */
456 if (i->dft->lldd_clear_nexus_port) {
457 struct asd_sas_port *port = task->dev->port;
458 SAS_DPRINTK("clearing nexus for port:%d\n",
459 port->id);
460 res = i->dft->lldd_clear_nexus_port(port);
461 if (res == TMF_RESP_FUNC_COMPLETE) {
462 SAS_DPRINTK("clear nexus port:%d "
463 "succeeded\n", port->id);
464 task->task_done(task);
465 sas_scsi_clear_queue_port(&error_q,
466 port);
467 goto Again;
468 }
469 }
470 if (i->dft->lldd_clear_nexus_ha) {
471 SAS_DPRINTK("clear nexus ha\n");
472 res = i->dft->lldd_clear_nexus_ha(ha);
473 if (res == TMF_RESP_FUNC_COMPLETE) {
474 SAS_DPRINTK("clear nexus ha "
475 "succeeded\n");
476 task->task_done(task);
477 goto out;
478 }
479 }
480 /* If we are here -- this means that no amount
481 * of effort could recover from errors. Quite
482 * possibly the HA just disappeared.
483 */
484 SAS_DPRINTK("error from device %llx, LUN %x "
485 "couldn't be recovered in any way\n",
486 SAS_ADDR(task->dev->sas_addr),
487 cmd->device->lun);
488
489 task->task_done(task);
490 goto clear_q;
491 }
492 }
493out:
494 SAS_DPRINTK("--- Exit %s\n", __FUNCTION__);
495 return;
496clear_q:
497 SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__);
498 list_for_each_entry_safe(cmd, n, &error_q, eh_entry) {
499 struct sas_task *task = TO_SAS_TASK(cmd);
500 list_del_init(&cmd->eh_entry);
501 task->task_done(task);
502 }
503}
504
505enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
506{
507 struct sas_task *task = TO_SAS_TASK(cmd);
508 unsigned long flags;
509
510 if (!task) {
511 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n",
512 cmd, task);
513 return EH_HANDLED;
514 }
515
516 spin_lock_irqsave(&task->task_state_lock, flags);
517 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
518 spin_unlock_irqrestore(&task->task_state_lock, flags);
519 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n",
520 cmd, task);
521 return EH_HANDLED;
522 }
523 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
524 spin_unlock_irqrestore(&task->task_state_lock, flags);
525
526 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_NOT_HANDLED\n",
527 cmd, task);
528
529 return EH_NOT_HANDLED;
530}
531
532struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
533{
534 struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent);
535 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
536 struct domain_device *found_dev = NULL;
537 int i;
538
539 spin_lock(&ha->phy_port_lock);
540 for (i = 0; i < ha->num_phys; i++) {
541 struct asd_sas_port *port = ha->sas_port[i];
542 struct domain_device *dev;
543
544 spin_lock(&port->dev_list_lock);
545 list_for_each_entry(dev, &port->dev_list, dev_list_node) {
546 if (rphy == dev->rphy) {
547 found_dev = dev;
548 spin_unlock(&port->dev_list_lock);
549 goto found;
550 }
551 }
552 spin_unlock(&port->dev_list_lock);
553 }
554 found:
555 spin_unlock(&ha->phy_port_lock);
556
557 return found_dev;
558}
559
560static inline struct domain_device *sas_find_target(struct scsi_target *starget)
561{
562 struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent);
563
564 return sas_find_dev_by_rphy(rphy);
565}
566
567int sas_target_alloc(struct scsi_target *starget)
568{
569 struct domain_device *found_dev = sas_find_target(starget);
570
571 if (!found_dev)
572 return -ENODEV;
573
574 starget->hostdata = found_dev;
575 return 0;
576}
577
578#define SAS_DEF_QD 32
579#define SAS_MAX_QD 64
580
581int sas_slave_configure(struct scsi_device *scsi_dev)
582{
583 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
584 struct sas_ha_struct *sas_ha;
585
586 BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
587
588 sas_ha = dev->port->ha;
589
590 sas_read_port_mode_page(scsi_dev);
591
592 if (scsi_dev->tagged_supported) {
593 scsi_set_tag_type(scsi_dev, MSG_SIMPLE_TAG);
594 scsi_activate_tcq(scsi_dev, SAS_DEF_QD);
595 } else {
596 SAS_DPRINTK("device %llx, LUN %x doesn't support "
597 "TCQ\n", SAS_ADDR(dev->sas_addr),
598 scsi_dev->lun);
599 scsi_dev->tagged_supported = 0;
600 scsi_set_tag_type(scsi_dev, 0);
601 scsi_deactivate_tcq(scsi_dev, 1);
602 }
603
604 return 0;
605}
606
607void sas_slave_destroy(struct scsi_device *scsi_dev)
608{
609}
610
611int sas_change_queue_depth(struct scsi_device *scsi_dev, int new_depth)
612{
613 int res = min(new_depth, SAS_MAX_QD);
614
615 if (scsi_dev->tagged_supported)
616 scsi_adjust_queue_depth(scsi_dev, scsi_get_tag_type(scsi_dev),
617 res);
618 else {
619 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
620 sas_printk("device %llx LUN %x queue depth changed to 1\n",
621 SAS_ADDR(dev->sas_addr),
622 scsi_dev->lun);
623 scsi_adjust_queue_depth(scsi_dev, 0, 1);
624 res = 1;
625 }
626
627 return res;
628}
629
630int sas_change_queue_type(struct scsi_device *scsi_dev, int qt)
631{
632 if (!scsi_dev->tagged_supported)
633 return 0;
634
635 scsi_deactivate_tcq(scsi_dev, 1);
636
637 scsi_set_tag_type(scsi_dev, qt);
638 scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth);
639
640 return qt;
641}
642
643int sas_bios_param(struct scsi_device *scsi_dev,
644 struct block_device *bdev,
645 sector_t capacity, int *hsc)
646{
647 hsc[0] = 255;
648 hsc[1] = 63;
649 sector_div(capacity, 255*63);
650 hsc[2] = capacity;
651
652 return 0;
653}
654
655/* ---------- Task Collector Thread implementation ---------- */
656
657static void sas_queue(struct sas_ha_struct *sas_ha)
658{
659 struct scsi_core *core = &sas_ha->core;
660 unsigned long flags;
661 LIST_HEAD(q);
662 int can_queue;
663 int res;
664 struct sas_internal *i = to_sas_internal(core->shost->transportt);
665
666 spin_lock_irqsave(&core->task_queue_lock, flags);
667 while (!core->queue_thread_kill &&
668 !list_empty(&core->task_queue)) {
669
670 can_queue = sas_ha->lldd_queue_size - core->task_queue_size;
671 if (can_queue >= 0) {
672 can_queue = core->task_queue_size;
673 list_splice_init(&core->task_queue, &q);
674 } else {
675 struct list_head *a, *n;
676
677 can_queue = sas_ha->lldd_queue_size;
678 list_for_each_safe(a, n, &core->task_queue) {
679 list_move_tail(a, &q);
680 if (--can_queue == 0)
681 break;
682 }
683 can_queue = sas_ha->lldd_queue_size;
684 }
685 core->task_queue_size -= can_queue;
686 spin_unlock_irqrestore(&core->task_queue_lock, flags);
687 {
688 struct sas_task *task = list_entry(q.next,
689 struct sas_task,
690 list);
691 list_del_init(&q);
692 res = i->dft->lldd_execute_task(task, can_queue,
693 GFP_KERNEL);
694 if (unlikely(res))
695 __list_add(&q, task->list.prev, &task->list);
696 }
697 spin_lock_irqsave(&core->task_queue_lock, flags);
698 if (res) {
699 list_splice_init(&q, &core->task_queue); /*at head*/
700 core->task_queue_size += can_queue;
701 }
702 }
703 spin_unlock_irqrestore(&core->task_queue_lock, flags);
704}
705
706static DECLARE_COMPLETION(queue_th_comp);
707
708/**
709 * sas_queue_thread -- The Task Collector thread
710 * @_sas_ha: pointer to struct sas_ha
711 */
712static int sas_queue_thread(void *_sas_ha)
713{
714 struct sas_ha_struct *sas_ha = _sas_ha;
715 struct scsi_core *core = &sas_ha->core;
716
717 daemonize("sas_queue_%d", core->shost->host_no);
718 current->flags |= PF_NOFREEZE;
719
720 complete(&queue_th_comp);
721
722 while (1) {
723 down_interruptible(&core->queue_thread_sema);
724 sas_queue(sas_ha);
725 if (core->queue_thread_kill)
726 break;
727 }
728
729 complete(&queue_th_comp);
730
731 return 0;
732}
733
734int sas_init_queue(struct sas_ha_struct *sas_ha)
735{
736 int res;
737 struct scsi_core *core = &sas_ha->core;
738
739 spin_lock_init(&core->task_queue_lock);
740 core->task_queue_size = 0;
741 INIT_LIST_HEAD(&core->task_queue);
742 init_MUTEX_LOCKED(&core->queue_thread_sema);
743
744 res = kernel_thread(sas_queue_thread, sas_ha, 0);
745 if (res >= 0)
746 wait_for_completion(&queue_th_comp);
747
748 return res < 0 ? res : 0;
749}
750
751void sas_shutdown_queue(struct sas_ha_struct *sas_ha)
752{
753 unsigned long flags;
754 struct scsi_core *core = &sas_ha->core;
755 struct sas_task *task, *n;
756
757 init_completion(&queue_th_comp);
758 core->queue_thread_kill = 1;
759 up(&core->queue_thread_sema);
760 wait_for_completion(&queue_th_comp);
761
762 if (!list_empty(&core->task_queue))
763 SAS_DPRINTK("HA: %llx: scsi core task queue is NOT empty!?\n",
764 SAS_ADDR(sas_ha->sas_addr));
765
766 spin_lock_irqsave(&core->task_queue_lock, flags);
767 list_for_each_entry_safe(task, n, &core->task_queue, list) {
768 struct scsi_cmnd *cmd = task->uldd_task;
769
770 list_del_init(&task->list);
771
772 ASSIGN_SAS_TASK(cmd, NULL);
773 sas_free_task(task);
774 cmd->result = DID_ABORT << 16;
775 cmd->scsi_done(cmd);
776 }
777 spin_unlock_irqrestore(&core->task_queue_lock, flags);
778}
779
780EXPORT_SYMBOL_GPL(sas_queuecommand);
781EXPORT_SYMBOL_GPL(sas_target_alloc);
782EXPORT_SYMBOL_GPL(sas_slave_configure);
783EXPORT_SYMBOL_GPL(sas_slave_destroy);
784EXPORT_SYMBOL_GPL(sas_change_queue_depth);
785EXPORT_SYMBOL_GPL(sas_change_queue_type);
786EXPORT_SYMBOL_GPL(sas_bios_param);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index d44f9aac6b8f..3f7f5f8abd75 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -285,6 +285,7 @@ struct lpfc_hba {
285 uint32_t cfg_log_verbose; 285 uint32_t cfg_log_verbose;
286 uint32_t cfg_lun_queue_depth; 286 uint32_t cfg_lun_queue_depth;
287 uint32_t cfg_nodev_tmo; 287 uint32_t cfg_nodev_tmo;
288 uint32_t cfg_devloss_tmo;
288 uint32_t cfg_hba_queue_depth; 289 uint32_t cfg_hba_queue_depth;
289 uint32_t cfg_fcp_class; 290 uint32_t cfg_fcp_class;
290 uint32_t cfg_use_adisc; 291 uint32_t cfg_use_adisc;
@@ -302,6 +303,9 @@ struct lpfc_hba {
302 uint32_t cfg_poll_tmo; 303 uint32_t cfg_poll_tmo;
303 uint32_t cfg_sg_seg_cnt; 304 uint32_t cfg_sg_seg_cnt;
304 uint32_t cfg_sg_dma_buf_size; 305 uint32_t cfg_sg_dma_buf_size;
306 uint64_t cfg_soft_wwpn;
307
308 uint32_t dev_loss_tmo_changed;
305 309
306 lpfc_vpd_t vpd; /* vital product data */ 310 lpfc_vpd_t vpd; /* vital product data */
307 311
@@ -351,6 +355,8 @@ struct lpfc_hba {
351#define VPD_PORT 0x8 /* valid vpd port data */ 355#define VPD_PORT 0x8 /* valid vpd port data */
352#define VPD_MASK 0xf /* mask for any vpd data */ 356#define VPD_MASK 0xf /* mask for any vpd data */
353 357
358 uint8_t soft_wwpn_enable;
359
354 struct timer_list fcp_poll_timer; 360 struct timer_list fcp_poll_timer;
355 struct timer_list els_tmofunc; 361 struct timer_list els_tmofunc;
356 362
@@ -391,3 +397,5 @@ struct rnidrsp {
391 struct list_head list; 397 struct list_head list;
392 uint32_t data; 398 uint32_t data;
393}; 399};
400
401#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index d384c16f4a87..9496e87c135e 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -39,6 +39,9 @@
39#include "lpfc_compat.h" 39#include "lpfc_compat.h"
40#include "lpfc_crtn.h" 40#include "lpfc_crtn.h"
41 41
42#define LPFC_DEF_DEVLOSS_TMO 30
43#define LPFC_MIN_DEVLOSS_TMO 1
44#define LPFC_MAX_DEVLOSS_TMO 255
42 45
43static void 46static void
44lpfc_jedec_to_ascii(int incr, char hdw[]) 47lpfc_jedec_to_ascii(int incr, char hdw[])
@@ -548,6 +551,119 @@ static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
548 lpfc_board_mode_show, lpfc_board_mode_store); 551 lpfc_board_mode_show, lpfc_board_mode_store);
549static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); 552static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
550 553
554
555static char *lpfc_soft_wwpn_key = "C99G71SL8032A";
556
557static ssize_t
558lpfc_soft_wwpn_enable_store(struct class_device *cdev, const char *buf,
559 size_t count)
560{
561 struct Scsi_Host *host = class_to_shost(cdev);
562 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
563 unsigned int cnt = count;
564
565 /*
566 * We're doing a simple sanity check for soft_wwpn setting.
567 * We require that the user write a specific key to enable
568 * the soft_wwpn attribute to be settable. Once the attribute
569 * is written, the enable key resets. If further updates are
570 * desired, the key must be written again to re-enable the
571 * attribute.
572 *
573 * The "key" is not secret - it is a hardcoded string shown
574 * here. The intent is to protect against the random user or
575 * application that is just writing attributes.
576 */
577
578 /* count may include a LF at end of string */
579 if (buf[cnt-1] == '\n')
580 cnt--;
581
582 if ((cnt != strlen(lpfc_soft_wwpn_key)) ||
583 (strncmp(buf, lpfc_soft_wwpn_key, strlen(lpfc_soft_wwpn_key)) != 0))
584 return -EINVAL;
585
586 phba->soft_wwpn_enable = 1;
587 return count;
588}
589static CLASS_DEVICE_ATTR(lpfc_soft_wwpn_enable, S_IWUSR, NULL,
590 lpfc_soft_wwpn_enable_store);
591
592static ssize_t
593lpfc_soft_wwpn_show(struct class_device *cdev, char *buf)
594{
595 struct Scsi_Host *host = class_to_shost(cdev);
596 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
597 return snprintf(buf, PAGE_SIZE, "0x%llx\n", phba->cfg_soft_wwpn);
598}
599
600
601static ssize_t
602lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
603{
604 struct Scsi_Host *host = class_to_shost(cdev);
605 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
606 struct completion online_compl;
607 int stat1=0, stat2=0;
608 unsigned int i, j, cnt=count;
609 u8 wwpn[8];
610
611 /* count may include a LF at end of string */
612 if (buf[cnt-1] == '\n')
613 cnt--;
614
615 if (!phba->soft_wwpn_enable || (cnt < 16) || (cnt > 18) ||
616 ((cnt == 17) && (*buf++ != 'x')) ||
617 ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
618 return -EINVAL;
619
620 phba->soft_wwpn_enable = 0;
621
622 memset(wwpn, 0, sizeof(wwpn));
623
624 /* Validate and store the new name */
625 for (i=0, j=0; i < 16; i++) {
626 if ((*buf >= 'a') && (*buf <= 'f'))
627 j = ((j << 4) | ((*buf++ -'a') + 10));
628 else if ((*buf >= 'A') && (*buf <= 'F'))
629 j = ((j << 4) | ((*buf++ -'A') + 10));
630 else if ((*buf >= '0') && (*buf <= '9'))
631 j = ((j << 4) | (*buf++ -'0'));
632 else
633 return -EINVAL;
634 if (i % 2) {
635 wwpn[i/2] = j & 0xff;
636 j = 0;
637 }
638 }
639 phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
640 fc_host_port_name(host) = phba->cfg_soft_wwpn;
641
642 dev_printk(KERN_NOTICE, &phba->pcidev->dev,
643 "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
644
645 init_completion(&online_compl);
646 lpfc_workq_post_event(phba, &stat1, &online_compl, LPFC_EVT_OFFLINE);
647 wait_for_completion(&online_compl);
648 if (stat1)
649 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
650 "%d:0463 lpfc_soft_wwpn attribute set failed to reinit "
651 "adapter - %d\n", phba->brd_no, stat1);
652
653 init_completion(&online_compl);
654 lpfc_workq_post_event(phba, &stat2, &online_compl, LPFC_EVT_ONLINE);
655 wait_for_completion(&online_compl);
656 if (stat2)
657 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
658 "%d:0464 lpfc_soft_wwpn attribute set failed to reinit "
659 "adapter - %d\n", phba->brd_no, stat2);
660
661 return (stat1 || stat2) ? -EIO : count;
662}
663static CLASS_DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\
664 lpfc_soft_wwpn_show, lpfc_soft_wwpn_store);
665
666
551static int lpfc_poll = 0; 667static int lpfc_poll = 0;
552module_param(lpfc_poll, int, 0); 668module_param(lpfc_poll, int, 0);
553MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:" 669MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
@@ -559,6 +675,123 @@ static CLASS_DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
559 lpfc_poll_show, lpfc_poll_store); 675 lpfc_poll_show, lpfc_poll_store);
560 676
561/* 677/*
678# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
679# until the timer expires. Value range is [0,255]. Default value is 30.
680*/
681static int lpfc_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
682static int lpfc_devloss_tmo = LPFC_DEF_DEVLOSS_TMO;
683module_param(lpfc_nodev_tmo, int, 0);
684MODULE_PARM_DESC(lpfc_nodev_tmo,
685 "Seconds driver will hold I/O waiting "
686 "for a device to come back");
687static ssize_t
688lpfc_nodev_tmo_show(struct class_device *cdev, char *buf)
689{
690 struct Scsi_Host *host = class_to_shost(cdev);
691 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
692 int val = 0;
693 val = phba->cfg_devloss_tmo;
694 return snprintf(buf, PAGE_SIZE, "%d\n",
695 phba->cfg_devloss_tmo);
696}
697
698static int
699lpfc_nodev_tmo_init(struct lpfc_hba *phba, int val)
700{
701 static int warned;
702 if (phba->cfg_devloss_tmo != LPFC_DEF_DEVLOSS_TMO) {
703 phba->cfg_nodev_tmo = phba->cfg_devloss_tmo;
704 if (!warned && val != LPFC_DEF_DEVLOSS_TMO) {
705 warned = 1;
706 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
707 "%d:0402 Ignoring nodev_tmo module "
708 "parameter because devloss_tmo is"
709 " set.\n",
710 phba->brd_no);
711 }
712 return 0;
713 }
714
715 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
716 phba->cfg_nodev_tmo = val;
717 phba->cfg_devloss_tmo = val;
718 return 0;
719 }
720 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
721 "%d:0400 lpfc_nodev_tmo attribute cannot be set to %d, "
722 "allowed range is [%d, %d]\n",
723 phba->brd_no, val,
724 LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
725 phba->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
726 return -EINVAL;
727}
728
729static int
730lpfc_nodev_tmo_set(struct lpfc_hba *phba, int val)
731{
732 if (phba->dev_loss_tmo_changed ||
733 (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) {
734 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
735 "%d:0401 Ignoring change to nodev_tmo "
736 "because devloss_tmo is set.\n",
737 phba->brd_no);
738 return 0;
739 }
740
741 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
742 phba->cfg_nodev_tmo = val;
743 phba->cfg_devloss_tmo = val;
744 return 0;
745 }
746
747 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
748 "%d:0403 lpfc_nodev_tmo attribute cannot be set to %d, "
749 "allowed range is [%d, %d]\n",
750 phba->brd_no, val, LPFC_MIN_DEVLOSS_TMO,
751 LPFC_MAX_DEVLOSS_TMO);
752 return -EINVAL;
753}
754
755lpfc_param_store(nodev_tmo)
756
757static CLASS_DEVICE_ATTR(lpfc_nodev_tmo, S_IRUGO | S_IWUSR,
758 lpfc_nodev_tmo_show, lpfc_nodev_tmo_store);
759
760/*
761# lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that
762# disappear until the timer expires. Value range is [0,255]. Default
763# value is 30.
764*/
765module_param(lpfc_devloss_tmo, int, 0);
766MODULE_PARM_DESC(lpfc_devloss_tmo,
767 "Seconds driver will hold I/O waiting "
768 "for a device to come back");
769lpfc_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO,
770 LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO)
771lpfc_param_show(devloss_tmo)
772static int
773lpfc_devloss_tmo_set(struct lpfc_hba *phba, int val)
774{
775 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
776 phba->cfg_nodev_tmo = val;
777 phba->cfg_devloss_tmo = val;
778 phba->dev_loss_tmo_changed = 1;
779 return 0;
780 }
781
782 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
783 "%d:0404 lpfc_devloss_tmo attribute cannot be set to"
784 " %d, allowed range is [%d, %d]\n",
785 phba->brd_no, val, LPFC_MIN_DEVLOSS_TMO,
786 LPFC_MAX_DEVLOSS_TMO);
787 return -EINVAL;
788}
789
790lpfc_param_store(devloss_tmo)
791static CLASS_DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
792 lpfc_devloss_tmo_show, lpfc_devloss_tmo_store);
793
794/*
562# lpfc_log_verbose: Only turn this flag on if you are willing to risk being 795# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
563# deluged with LOTS of information. 796# deluged with LOTS of information.
564# You can set a bit mask to record specific types of verbose messages: 797# You can set a bit mask to record specific types of verbose messages:
@@ -617,14 +850,6 @@ LPFC_ATTR_R(scan_down, 1, 0, 1,
617 "Start scanning for devices from highest ALPA to lowest"); 850 "Start scanning for devices from highest ALPA to lowest");
618 851
619/* 852/*
620# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
621# until the timer expires. Value range is [0,255]. Default value is 30.
622# NOTE: this MUST be less then the SCSI Layer command timeout - 1.
623*/
624LPFC_ATTR_RW(nodev_tmo, 30, 0, 255,
625 "Seconds driver will hold I/O waiting for a device to come back");
626
627/*
628# lpfc_topology: link topology for init link 853# lpfc_topology: link topology for init link
629# 0x0 = attempt loop mode then point-to-point 854# 0x0 = attempt loop mode then point-to-point
630# 0x01 = internal loopback mode 855# 0x01 = internal loopback mode
@@ -720,6 +945,7 @@ LPFC_ATTR_R(max_luns, 255, 0, 65535,
720LPFC_ATTR_RW(poll_tmo, 10, 1, 255, 945LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
721 "Milliseconds driver will wait between polling FCP ring"); 946 "Milliseconds driver will wait between polling FCP ring");
722 947
948
723struct class_device_attribute *lpfc_host_attrs[] = { 949struct class_device_attribute *lpfc_host_attrs[] = {
724 &class_device_attr_info, 950 &class_device_attr_info,
725 &class_device_attr_serialnum, 951 &class_device_attr_serialnum,
@@ -737,6 +963,7 @@ struct class_device_attribute *lpfc_host_attrs[] = {
737 &class_device_attr_lpfc_lun_queue_depth, 963 &class_device_attr_lpfc_lun_queue_depth,
738 &class_device_attr_lpfc_hba_queue_depth, 964 &class_device_attr_lpfc_hba_queue_depth,
739 &class_device_attr_lpfc_nodev_tmo, 965 &class_device_attr_lpfc_nodev_tmo,
966 &class_device_attr_lpfc_devloss_tmo,
740 &class_device_attr_lpfc_fcp_class, 967 &class_device_attr_lpfc_fcp_class,
741 &class_device_attr_lpfc_use_adisc, 968 &class_device_attr_lpfc_use_adisc,
742 &class_device_attr_lpfc_ack0, 969 &class_device_attr_lpfc_ack0,
@@ -754,6 +981,8 @@ struct class_device_attribute *lpfc_host_attrs[] = {
754 &class_device_attr_issue_reset, 981 &class_device_attr_issue_reset,
755 &class_device_attr_lpfc_poll, 982 &class_device_attr_lpfc_poll,
756 &class_device_attr_lpfc_poll_tmo, 983 &class_device_attr_lpfc_poll_tmo,
984 &class_device_attr_lpfc_soft_wwpn,
985 &class_device_attr_lpfc_soft_wwpn_enable,
757 NULL, 986 NULL,
758}; 987};
759 988
@@ -1204,6 +1433,15 @@ lpfc_get_host_fabric_name (struct Scsi_Host *shost)
1204 fc_host_fabric_name(shost) = node_name; 1433 fc_host_fabric_name(shost) = node_name;
1205} 1434}
1206 1435
1436static void
1437lpfc_get_host_symbolic_name (struct Scsi_Host *shost)
1438{
1439 struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata;
1440
1441 spin_lock_irq(shost->host_lock);
1442 lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(shost));
1443 spin_unlock_irq(shost->host_lock);
1444}
1207 1445
1208static struct fc_host_statistics * 1446static struct fc_host_statistics *
1209lpfc_get_stats(struct Scsi_Host *shost) 1447lpfc_get_stats(struct Scsi_Host *shost)
@@ -1441,27 +1679,12 @@ lpfc_get_starget_port_name(struct scsi_target *starget)
1441} 1679}
1442 1680
1443static void 1681static void
1444lpfc_get_rport_loss_tmo(struct fc_rport *rport)
1445{
1446 /*
1447 * Return the driver's global value for device loss timeout plus
1448 * five seconds to allow the driver's nodev timer to run.
1449 */
1450 rport->dev_loss_tmo = lpfc_nodev_tmo + 5;
1451}
1452
1453static void
1454lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) 1682lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
1455{ 1683{
1456 /*
1457 * The driver doesn't have a per-target timeout setting. Set
1458 * this value globally. lpfc_nodev_tmo should be greater then 0.
1459 */
1460 if (timeout) 1684 if (timeout)
1461 lpfc_nodev_tmo = timeout; 1685 rport->dev_loss_tmo = timeout;
1462 else 1686 else
1463 lpfc_nodev_tmo = 1; 1687 rport->dev_loss_tmo = 1;
1464 rport->dev_loss_tmo = lpfc_nodev_tmo + 5;
1465} 1688}
1466 1689
1467 1690
@@ -1486,7 +1709,6 @@ struct fc_function_template lpfc_transport_functions = {
1486 .show_host_port_name = 1, 1709 .show_host_port_name = 1,
1487 .show_host_supported_classes = 1, 1710 .show_host_supported_classes = 1,
1488 .show_host_supported_fc4s = 1, 1711 .show_host_supported_fc4s = 1,
1489 .show_host_symbolic_name = 1,
1490 .show_host_supported_speeds = 1, 1712 .show_host_supported_speeds = 1,
1491 .show_host_maxframe_size = 1, 1713 .show_host_maxframe_size = 1,
1492 1714
@@ -1509,6 +1731,9 @@ struct fc_function_template lpfc_transport_functions = {
1509 .get_host_fabric_name = lpfc_get_host_fabric_name, 1731 .get_host_fabric_name = lpfc_get_host_fabric_name,
1510 .show_host_fabric_name = 1, 1732 .show_host_fabric_name = 1,
1511 1733
1734 .get_host_symbolic_name = lpfc_get_host_symbolic_name,
1735 .show_host_symbolic_name = 1,
1736
1512 /* 1737 /*
1513 * The LPFC driver treats linkdown handling as target loss events 1738 * The LPFC driver treats linkdown handling as target loss events
1514 * so there are no sysfs handlers for link_down_tmo. 1739 * so there are no sysfs handlers for link_down_tmo.
@@ -1521,7 +1746,6 @@ struct fc_function_template lpfc_transport_functions = {
1521 .show_rport_maxframe_size = 1, 1746 .show_rport_maxframe_size = 1,
1522 .show_rport_supported_classes = 1, 1747 .show_rport_supported_classes = 1,
1523 1748
1524 .get_rport_dev_loss_tmo = lpfc_get_rport_loss_tmo,
1525 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo, 1749 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
1526 .show_rport_dev_loss_tmo = 1, 1750 .show_rport_dev_loss_tmo = 1,
1527 1751
@@ -1535,6 +1759,8 @@ struct fc_function_template lpfc_transport_functions = {
1535 .show_starget_port_name = 1, 1759 .show_starget_port_name = 1,
1536 1760
1537 .issue_fc_host_lip = lpfc_issue_lip, 1761 .issue_fc_host_lip = lpfc_issue_lip,
1762 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
1763 .terminate_rport_io = lpfc_terminate_rport_io,
1538}; 1764};
1539 1765
1540void 1766void
@@ -1550,14 +1776,15 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
1550 lpfc_ack0_init(phba, lpfc_ack0); 1776 lpfc_ack0_init(phba, lpfc_ack0);
1551 lpfc_topology_init(phba, lpfc_topology); 1777 lpfc_topology_init(phba, lpfc_topology);
1552 lpfc_scan_down_init(phba, lpfc_scan_down); 1778 lpfc_scan_down_init(phba, lpfc_scan_down);
1553 lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo);
1554 lpfc_link_speed_init(phba, lpfc_link_speed); 1779 lpfc_link_speed_init(phba, lpfc_link_speed);
1555 lpfc_fdmi_on_init(phba, lpfc_fdmi_on); 1780 lpfc_fdmi_on_init(phba, lpfc_fdmi_on);
1556 lpfc_discovery_threads_init(phba, lpfc_discovery_threads); 1781 lpfc_discovery_threads_init(phba, lpfc_discovery_threads);
1557 lpfc_max_luns_init(phba, lpfc_max_luns); 1782 lpfc_max_luns_init(phba, lpfc_max_luns);
1558 lpfc_poll_tmo_init(phba, lpfc_poll_tmo); 1783 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
1559 1784 lpfc_devloss_tmo_init(phba, lpfc_devloss_tmo);
1785 lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo);
1560 phba->cfg_poll = lpfc_poll; 1786 phba->cfg_poll = lpfc_poll;
1787 phba->cfg_soft_wwpn = 0L;
1561 1788
1562 /* 1789 /*
1563 * The total number of segments is the configuration value plus 2 1790 * The total number of segments is the configuration value plus 2
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 2a176467f71b..3d684496acde 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -18,6 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21struct fc_rport;
21void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); 22void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
22void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); 23void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
23int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, 24int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
@@ -200,6 +201,8 @@ extern struct scsi_host_template lpfc_template;
200extern struct fc_function_template lpfc_transport_functions; 201extern struct fc_function_template lpfc_transport_functions;
201 202
202void lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp); 203void lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp);
204void lpfc_terminate_rport_io(struct fc_rport *);
205void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport);
203 206
204#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) 207#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
205#define HBA_EVENT_RSCN 5 208#define HBA_EVENT_RSCN 5
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index bbb7310210b0..ae4106458991 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -324,7 +324,6 @@ lpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size)
324 struct lpfc_sli_ct_request *Response = 324 struct lpfc_sli_ct_request *Response =
325 (struct lpfc_sli_ct_request *) mp->virt; 325 (struct lpfc_sli_ct_request *) mp->virt;
326 struct lpfc_nodelist *ndlp = NULL; 326 struct lpfc_nodelist *ndlp = NULL;
327 struct lpfc_nodelist *next_ndlp;
328 struct lpfc_dmabuf *mlast, *next_mp; 327 struct lpfc_dmabuf *mlast, *next_mp;
329 uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType; 328 uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType;
330 uint32_t Did; 329 uint32_t Did;
@@ -399,30 +398,6 @@ nsout1:
399 * current driver state. 398 * current driver state.
400 */ 399 */
401 if (phba->hba_state == LPFC_HBA_READY) { 400 if (phba->hba_state == LPFC_HBA_READY) {
402
403 /*
404 * Switch ports that connect a loop of multiple targets need
405 * special consideration. The driver wants to unregister the
406 * rpi only on the target that was pulled from the loop. On
407 * RSCN, the driver wants to rediscover an NPort only if the
408 * driver flagged it as NLP_NPR_2B_DISC. Provided adisc is
409 * not enabled and the NPort is not capable of retransmissions
410 * (FC Tape) prevent timing races with the scsi error handler by
411 * unregistering the Nport's RPI. This action causes all
412 * outstanding IO to flush back to the midlayer.
413 */
414 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
415 nlp_listp) {
416 if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
417 (lpfc_rscn_payload_check(phba, ndlp->nlp_DID))) {
418 if ((phba->cfg_use_adisc == 0) &&
419 !(ndlp->nlp_fcp_info &
420 NLP_FCP_2_DEVICE)) {
421 lpfc_unreg_rpi(phba, ndlp);
422 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
423 }
424 }
425 }
426 lpfc_els_flush_rscn(phba); 401 lpfc_els_flush_rscn(phba);
427 spin_lock_irq(phba->host->host_lock); 402 spin_lock_irq(phba->host->host_lock);
428 phba->fc_flag |= FC_RSCN_MODE; /* we are still in RSCN mode */ 403 phba->fc_flag |= FC_RSCN_MODE; /* we are still in RSCN mode */
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 41cf5d3ea6ce..9766f909c9c6 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -30,7 +30,6 @@
30 30
31/* worker thread events */ 31/* worker thread events */
32enum lpfc_work_type { 32enum lpfc_work_type {
33 LPFC_EVT_NODEV_TMO,
34 LPFC_EVT_ONLINE, 33 LPFC_EVT_ONLINE,
35 LPFC_EVT_OFFLINE, 34 LPFC_EVT_OFFLINE,
36 LPFC_EVT_WARM_START, 35 LPFC_EVT_WARM_START,
@@ -74,11 +73,9 @@ struct lpfc_nodelist {
74#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */ 73#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */
75 74
76 struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */ 75 struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
77 struct timer_list nlp_tmofunc; /* Used for nodev tmo */
78 struct fc_rport *rport; /* Corresponding FC transport 76 struct fc_rport *rport; /* Corresponding FC transport
79 port structure */ 77 port structure */
80 struct lpfc_hba *nlp_phba; 78 struct lpfc_hba *nlp_phba;
81 struct lpfc_work_evt nodev_timeout_evt;
82 struct lpfc_work_evt els_retry_evt; 79 struct lpfc_work_evt els_retry_evt;
83 unsigned long last_ramp_up_time; /* jiffy of last ramp up */ 80 unsigned long last_ramp_up_time; /* jiffy of last ramp up */
84 unsigned long last_q_full_time; /* jiffy of last queue full */ 81 unsigned long last_q_full_time; /* jiffy of last queue full */
@@ -102,7 +99,6 @@ struct lpfc_nodelist {
102#define NLP_LOGO_SND 0x100 /* sent LOGO request for this entry */ 99#define NLP_LOGO_SND 0x100 /* sent LOGO request for this entry */
103#define NLP_RNID_SND 0x400 /* sent RNID request for this entry */ 100#define NLP_RNID_SND 0x400 /* sent RNID request for this entry */
104#define NLP_ELS_SND_MASK 0x7e0 /* sent ELS request for this entry */ 101#define NLP_ELS_SND_MASK 0x7e0 /* sent ELS request for this entry */
105#define NLP_NODEV_TMO 0x10000 /* nodev timeout is running for node */
106#define NLP_DELAY_TMO 0x20000 /* delay timeout is running for node */ 102#define NLP_DELAY_TMO 0x20000 /* delay timeout is running for node */
107#define NLP_NPR_2B_DISC 0x40000 /* node is included in num_disc_nodes */ 103#define NLP_NPR_2B_DISC 0x40000 /* node is included in num_disc_nodes */
108#define NLP_RCV_PLOGI 0x80000 /* Rcv'ed PLOGI from remote system */ 104#define NLP_RCV_PLOGI 0x80000 /* Rcv'ed PLOGI from remote system */
@@ -169,7 +165,7 @@ struct lpfc_nodelist {
169 */ 165 */
170/* 166/*
171 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped 167 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
172 * lists will receive a DEVICE_RECOVERY event. If the linkdown or nodev timers 168 * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
173 * expire, all effected nodes will receive a DEVICE_RM event. 169 * expire, all effected nodes will receive a DEVICE_RM event.
174 */ 170 */
175/* 171/*
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 3567de613162..71864cdc6c71 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -2506,6 +2506,7 @@ lpfc_els_rcv_rscn(struct lpfc_hba * phba,
2506 uint32_t *lp; 2506 uint32_t *lp;
2507 IOCB_t *icmd; 2507 IOCB_t *icmd;
2508 uint32_t payload_len, cmd; 2508 uint32_t payload_len, cmd;
2509 int i;
2509 2510
2510 icmd = &cmdiocb->iocb; 2511 icmd = &cmdiocb->iocb;
2511 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 2512 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -2524,6 +2525,10 @@ lpfc_els_rcv_rscn(struct lpfc_hba * phba,
2524 phba->brd_no, 2525 phba->brd_no,
2525 phba->fc_flag, payload_len, *lp, phba->fc_rscn_id_cnt); 2526 phba->fc_flag, payload_len, *lp, phba->fc_rscn_id_cnt);
2526 2527
2528 for (i = 0; i < payload_len/sizeof(uint32_t); i++)
2529 fc_host_post_event(phba->host, fc_get_event_number(),
2530 FCH_EVT_RSCN, lp[i]);
2531
2527 /* If we are about to begin discovery, just ACC the RSCN. 2532 /* If we are about to begin discovery, just ACC the RSCN.
2528 * Discovery processing will satisfy it. 2533 * Discovery processing will satisfy it.
2529 */ 2534 */
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index b2f1552f1848..d586c3d3b0d0 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -56,28 +56,63 @@ static uint8_t lpfcAlpaArray[] = {
56 56
57static void lpfc_disc_timeout_handler(struct lpfc_hba *); 57static void lpfc_disc_timeout_handler(struct lpfc_hba *);
58 58
59static void 59void
60lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 60lpfc_terminate_rport_io(struct fc_rport *rport)
61{ 61{
62 uint8_t *name = (uint8_t *)&ndlp->nlp_portname; 62 struct lpfc_rport_data *rdata;
63 int warn_on = 0; 63 struct lpfc_nodelist * ndlp;
64 struct lpfc_hba *phba;
64 65
65 spin_lock_irq(phba->host->host_lock); 66 rdata = rport->dd_data;
66 if (!(ndlp->nlp_flag & NLP_NODEV_TMO)) { 67 ndlp = rdata->pnode;
67 spin_unlock_irq(phba->host->host_lock); 68
69 if (!ndlp) {
70 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
71 printk(KERN_ERR "Cannot find remote node"
72 " to terminate I/O Data x%x\n",
73 rport->port_id);
68 return; 74 return;
69 } 75 }
70 76
71 /* 77 phba = ndlp->nlp_phba;
72 * If a discovery event readded nodev_timer after timer 78
73 * firing and before processing the timer, cancel the
74 * nlp_tmofunc.
75 */
76 spin_unlock_irq(phba->host->host_lock);
77 del_timer_sync(&ndlp->nlp_tmofunc);
78 spin_lock_irq(phba->host->host_lock); 79 spin_lock_irq(phba->host->host_lock);
80 if (ndlp->nlp_sid != NLP_NO_SID) {
81 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
82 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
83 }
84 spin_unlock_irq(phba->host->host_lock);
79 85
80 ndlp->nlp_flag &= ~NLP_NODEV_TMO; 86 return;
87}
88
89/*
90 * This function will be called when dev_loss_tmo fire.
91 */
92void
93lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
94{
95 struct lpfc_rport_data *rdata;
96 struct lpfc_nodelist * ndlp;
97 uint8_t *name;
98 int warn_on = 0;
99 struct lpfc_hba *phba;
100
101 rdata = rport->dd_data;
102 ndlp = rdata->pnode;
103
104 if (!ndlp) {
105 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
106 printk(KERN_ERR "Cannot find remote node"
107 " for rport in dev_loss_tmo_callbk x%x\n",
108 rport->port_id);
109 return;
110 }
111
112 name = (uint8_t *)&ndlp->nlp_portname;
113 phba = ndlp->nlp_phba;
114
115 spin_lock_irq(phba->host->host_lock);
81 116
82 if (ndlp->nlp_sid != NLP_NO_SID) { 117 if (ndlp->nlp_sid != NLP_NO_SID) {
83 warn_on = 1; 118 warn_on = 1;
@@ -85,11 +120,14 @@ lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
85 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 120 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
86 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT); 121 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
87 } 122 }
123 if (phba->fc_flag & FC_UNLOADING)
124 warn_on = 0;
125
88 spin_unlock_irq(phba->host->host_lock); 126 spin_unlock_irq(phba->host->host_lock);
89 127
90 if (warn_on) { 128 if (warn_on) {
91 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 129 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
92 "%d:0203 Nodev timeout on " 130 "%d:0203 Devloss timeout on "
93 "WWPN %x:%x:%x:%x:%x:%x:%x:%x " 131 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
94 "NPort x%x Data: x%x x%x x%x\n", 132 "NPort x%x Data: x%x x%x x%x\n",
95 phba->brd_no, 133 phba->brd_no,
@@ -99,7 +137,7 @@ lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
99 ndlp->nlp_state, ndlp->nlp_rpi); 137 ndlp->nlp_state, ndlp->nlp_rpi);
100 } else { 138 } else {
101 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 139 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
102 "%d:0204 Nodev timeout on " 140 "%d:0204 Devloss timeout on "
103 "WWPN %x:%x:%x:%x:%x:%x:%x:%x " 141 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
104 "NPort x%x Data: x%x x%x x%x\n", 142 "NPort x%x Data: x%x x%x x%x\n",
105 phba->brd_no, 143 phba->brd_no,
@@ -109,7 +147,12 @@ lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
109 ndlp->nlp_state, ndlp->nlp_rpi); 147 ndlp->nlp_state, ndlp->nlp_rpi);
110 } 148 }
111 149
112 lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM); 150 ndlp->rport = NULL;
151 rdata->pnode = NULL;
152
153 if (!(phba->fc_flag & FC_UNLOADING))
154 lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM);
155
113 return; 156 return;
114} 157}
115 158
@@ -127,11 +170,6 @@ lpfc_work_list_done(struct lpfc_hba * phba)
127 spin_unlock_irq(phba->host->host_lock); 170 spin_unlock_irq(phba->host->host_lock);
128 free_evt = 1; 171 free_evt = 1;
129 switch (evtp->evt) { 172 switch (evtp->evt) {
130 case LPFC_EVT_NODEV_TMO:
131 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
132 lpfc_process_nodev_timeout(phba, ndlp);
133 free_evt = 0;
134 break;
135 case LPFC_EVT_ELS_RETRY: 173 case LPFC_EVT_ELS_RETRY:
136 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); 174 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
137 lpfc_els_retry_delay_handler(ndlp); 175 lpfc_els_retry_delay_handler(ndlp);
@@ -340,6 +378,9 @@ lpfc_linkdown(struct lpfc_hba * phba)
340 spin_unlock_irq(phba->host->host_lock); 378 spin_unlock_irq(phba->host->host_lock);
341 } 379 }
342 380
381 fc_host_post_event(phba->host, fc_get_event_number(),
382 FCH_EVT_LINKDOWN, 0);
383
343 /* Clean up any firmware default rpi's */ 384 /* Clean up any firmware default rpi's */
344 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) { 385 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
345 lpfc_unreg_did(phba, 0xffffffff, mb); 386 lpfc_unreg_did(phba, 0xffffffff, mb);
@@ -374,16 +415,6 @@ lpfc_linkdown(struct lpfc_hba * phba)
374 rc = lpfc_disc_state_machine(phba, ndlp, NULL, 415 rc = lpfc_disc_state_machine(phba, ndlp, NULL,
375 NLP_EVT_DEVICE_RECOVERY); 416 NLP_EVT_DEVICE_RECOVERY);
376 417
377 /* Check config parameter use-adisc or FCP-2 */
378 if ((rc != NLP_STE_FREED_NODE) &&
379 (phba->cfg_use_adisc == 0) &&
380 !(ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE)) {
381 /* We know we will have to relogin, so
382 * unreglogin the rpi right now to fail
383 * any outstanding I/Os quickly.
384 */
385 lpfc_unreg_rpi(phba, ndlp);
386 }
387 } 418 }
388 } 419 }
389 420
@@ -427,6 +458,9 @@ lpfc_linkup(struct lpfc_hba * phba)
427 struct list_head *listp, *node_list[7]; 458 struct list_head *listp, *node_list[7];
428 int i; 459 int i;
429 460
461 fc_host_post_event(phba->host, fc_get_event_number(),
462 FCH_EVT_LINKUP, 0);
463
430 spin_lock_irq(phba->host->host_lock); 464 spin_lock_irq(phba->host->host_lock);
431 phba->hba_state = LPFC_LINK_UP; 465 phba->hba_state = LPFC_LINK_UP;
432 phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY | 466 phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
@@ -638,6 +672,8 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
638 672
639 memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt, 673 memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt,
640 sizeof (struct serv_parm)); 674 sizeof (struct serv_parm));
675 if (phba->cfg_soft_wwpn)
676 u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn);
641 memcpy((uint8_t *) & phba->fc_nodename, 677 memcpy((uint8_t *) & phba->fc_nodename,
642 (uint8_t *) & phba->fc_sparam.nodeName, 678 (uint8_t *) & phba->fc_sparam.nodeName,
643 sizeof (struct lpfc_name)); 679 sizeof (struct lpfc_name));
@@ -1098,8 +1134,11 @@ lpfc_unregister_remote_port(struct lpfc_hba * phba,
1098 struct fc_rport *rport = ndlp->rport; 1134 struct fc_rport *rport = ndlp->rport;
1099 struct lpfc_rport_data *rdata = rport->dd_data; 1135 struct lpfc_rport_data *rdata = rport->dd_data;
1100 1136
1101 ndlp->rport = NULL; 1137 if (rport->scsi_target_id == -1) {
1102 rdata->pnode = NULL; 1138 ndlp->rport = NULL;
1139 rdata->pnode = NULL;
1140 }
1141
1103 fc_remote_port_delete(rport); 1142 fc_remote_port_delete(rport);
1104 1143
1105 return; 1144 return;
@@ -1227,17 +1266,6 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1227 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpunmap_list); 1266 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpunmap_list);
1228 phba->fc_unmap_cnt++; 1267 phba->fc_unmap_cnt++;
1229 phba->nport_event_cnt++; 1268 phba->nport_event_cnt++;
1230 /* stop nodev tmo if running */
1231 if (nlp->nlp_flag & NLP_NODEV_TMO) {
1232 nlp->nlp_flag &= ~NLP_NODEV_TMO;
1233 spin_unlock_irq(phba->host->host_lock);
1234 del_timer_sync(&nlp->nlp_tmofunc);
1235 spin_lock_irq(phba->host->host_lock);
1236 if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
1237 list_del_init(&nlp->nodev_timeout_evt.
1238 evt_listp);
1239
1240 }
1241 nlp->nlp_flag &= ~NLP_NODEV_REMOVE; 1269 nlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1242 nlp->nlp_type |= NLP_FC_NODE; 1270 nlp->nlp_type |= NLP_FC_NODE;
1243 break; 1271 break;
@@ -1248,17 +1276,6 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1248 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpmap_list); 1276 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpmap_list);
1249 phba->fc_map_cnt++; 1277 phba->fc_map_cnt++;
1250 phba->nport_event_cnt++; 1278 phba->nport_event_cnt++;
1251 /* stop nodev tmo if running */
1252 if (nlp->nlp_flag & NLP_NODEV_TMO) {
1253 nlp->nlp_flag &= ~NLP_NODEV_TMO;
1254 spin_unlock_irq(phba->host->host_lock);
1255 del_timer_sync(&nlp->nlp_tmofunc);
1256 spin_lock_irq(phba->host->host_lock);
1257 if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
1258 list_del_init(&nlp->nodev_timeout_evt.
1259 evt_listp);
1260
1261 }
1262 nlp->nlp_flag &= ~NLP_NODEV_REMOVE; 1279 nlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1263 break; 1280 break;
1264 case NLP_NPR_LIST: 1281 case NLP_NPR_LIST:
@@ -1267,11 +1284,6 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1267 list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list); 1284 list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list);
1268 phba->fc_npr_cnt++; 1285 phba->fc_npr_cnt++;
1269 1286
1270 if (!(nlp->nlp_flag & NLP_NODEV_TMO))
1271 mod_timer(&nlp->nlp_tmofunc,
1272 jiffies + HZ * phba->cfg_nodev_tmo);
1273
1274 nlp->nlp_flag |= NLP_NODEV_TMO;
1275 nlp->nlp_flag &= ~NLP_RCV_PLOGI; 1287 nlp->nlp_flag &= ~NLP_RCV_PLOGI;
1276 break; 1288 break;
1277 case NLP_JUST_DQ: 1289 case NLP_JUST_DQ:
@@ -1301,7 +1313,8 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1301 * already. If we have, and it's a scsi entity, be 1313 * already. If we have, and it's a scsi entity, be
1302 * sure to unblock any attached scsi devices 1314 * sure to unblock any attached scsi devices
1303 */ 1315 */
1304 if (!nlp->rport) 1316 if ((!nlp->rport) || (nlp->rport->port_state ==
1317 FC_PORTSTATE_BLOCKED))
1305 lpfc_register_remote_port(phba, nlp); 1318 lpfc_register_remote_port(phba, nlp);
1306 1319
1307 /* 1320 /*
@@ -1575,15 +1588,12 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1575 1588
1576 lpfc_els_abort(phba,ndlp,0); 1589 lpfc_els_abort(phba,ndlp,0);
1577 spin_lock_irq(phba->host->host_lock); 1590 spin_lock_irq(phba->host->host_lock);
1578 ndlp->nlp_flag &= ~(NLP_NODEV_TMO|NLP_DELAY_TMO); 1591 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1579 spin_unlock_irq(phba->host->host_lock); 1592 spin_unlock_irq(phba->host->host_lock);
1580 del_timer_sync(&ndlp->nlp_tmofunc);
1581 1593
1582 ndlp->nlp_last_elscmd = 0; 1594 ndlp->nlp_last_elscmd = 0;
1583 del_timer_sync(&ndlp->nlp_delayfunc); 1595 del_timer_sync(&ndlp->nlp_delayfunc);
1584 1596
1585 if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
1586 list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
1587 if (!list_empty(&ndlp->els_retry_evt.evt_listp)) 1597 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1588 list_del_init(&ndlp->els_retry_evt.evt_listp); 1598 list_del_init(&ndlp->els_retry_evt.evt_listp);
1589 1599
@@ -1600,16 +1610,6 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1600int 1610int
1601lpfc_nlp_remove(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) 1611lpfc_nlp_remove(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1602{ 1612{
1603 if (ndlp->nlp_flag & NLP_NODEV_TMO) {
1604 spin_lock_irq(phba->host->host_lock);
1605 ndlp->nlp_flag &= ~NLP_NODEV_TMO;
1606 spin_unlock_irq(phba->host->host_lock);
1607 del_timer_sync(&ndlp->nlp_tmofunc);
1608 if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
1609 list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
1610
1611 }
1612
1613 1613
1614 if (ndlp->nlp_flag & NLP_DELAY_TMO) { 1614 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1615 lpfc_cancel_retry_delay_tmo(phba, ndlp); 1615 lpfc_cancel_retry_delay_tmo(phba, ndlp);
@@ -2424,34 +2424,6 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2424 return; 2424 return;
2425} 2425}
2426 2426
2427static void
2428lpfc_nodev_timeout(unsigned long ptr)
2429{
2430 struct lpfc_hba *phba;
2431 struct lpfc_nodelist *ndlp;
2432 unsigned long iflag;
2433 struct lpfc_work_evt *evtp;
2434
2435 ndlp = (struct lpfc_nodelist *)ptr;
2436 phba = ndlp->nlp_phba;
2437 evtp = &ndlp->nodev_timeout_evt;
2438 spin_lock_irqsave(phba->host->host_lock, iflag);
2439
2440 if (!list_empty(&evtp->evt_listp)) {
2441 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2442 return;
2443 }
2444 evtp->evt_arg1 = ndlp;
2445 evtp->evt = LPFC_EVT_NODEV_TMO;
2446 list_add_tail(&evtp->evt_listp, &phba->work_list);
2447 if (phba->work_wait)
2448 wake_up(phba->work_wait);
2449
2450 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2451 return;
2452}
2453
2454
2455/* 2427/*
2456 * This routine handles processing a NameServer REG_LOGIN mailbox 2428 * This routine handles processing a NameServer REG_LOGIN mailbox
2457 * command upon completion. It is setup in the LPFC_MBOXQ 2429 * command upon completion. It is setup in the LPFC_MBOXQ
@@ -2575,11 +2547,7 @@ lpfc_nlp_init(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
2575 uint32_t did) 2547 uint32_t did)
2576{ 2548{
2577 memset(ndlp, 0, sizeof (struct lpfc_nodelist)); 2549 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2578 INIT_LIST_HEAD(&ndlp->nodev_timeout_evt.evt_listp);
2579 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); 2550 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
2580 init_timer(&ndlp->nlp_tmofunc);
2581 ndlp->nlp_tmofunc.function = lpfc_nodev_timeout;
2582 ndlp->nlp_tmofunc.data = (unsigned long)ndlp;
2583 init_timer(&ndlp->nlp_delayfunc); 2551 init_timer(&ndlp->nlp_delayfunc);
2584 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; 2552 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2585 ndlp->nlp_delayfunc.data = (unsigned long)ndlp; 2553 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index f6948ffe689a..4cdf3464267f 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -268,6 +268,8 @@ lpfc_config_port_post(struct lpfc_hba * phba)
268 kfree(mp); 268 kfree(mp);
269 pmb->context1 = NULL; 269 pmb->context1 = NULL;
270 270
271 if (phba->cfg_soft_wwpn)
272 u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn);
271 memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName, 273 memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName,
272 sizeof (struct lpfc_name)); 274 sizeof (struct lpfc_name));
273 memcpy(&phba->fc_portname, &phba->fc_sparam.portName, 275 memcpy(&phba->fc_portname, &phba->fc_sparam.portName,
@@ -511,6 +513,7 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
511{ 513{
512 struct lpfc_sli *psli = &phba->sli; 514 struct lpfc_sli *psli = &phba->sli;
513 struct lpfc_sli_ring *pring; 515 struct lpfc_sli_ring *pring;
516 uint32_t event_data;
514 517
515 if (phba->work_hs & HS_FFER6) { 518 if (phba->work_hs & HS_FFER6) {
516 /* Re-establishing Link */ 519 /* Re-establishing Link */
@@ -555,6 +558,11 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
555 phba->brd_no, phba->work_hs, 558 phba->brd_no, phba->work_hs,
556 phba->work_status[0], phba->work_status[1]); 559 phba->work_status[0], phba->work_status[1]);
557 560
561 event_data = FC_REG_DUMP_EVENT;
562 fc_host_post_vendor_event(phba->host, fc_get_event_number(),
563 sizeof(event_data), (char *) &event_data,
564 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
565
558 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 566 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
559 lpfc_offline(phba); 567 lpfc_offline(phba);
560 phba->hba_state = LPFC_HBA_ERROR; 568 phba->hba_state = LPFC_HBA_ERROR;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 20449a8dd53d..d5f415007db2 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1813,7 +1813,7 @@ lpfc_device_recov_npr_node(struct lpfc_hba * phba,
1813 */ 1813 */
1814/* 1814/*
1815 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped 1815 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
1816 * lists will receive a DEVICE_RECOVERY event. If the linkdown or nodev timers 1816 * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
1817 * expire, all effected nodes will receive a DEVICE_RM event. 1817 * expire, all effected nodes will receive a DEVICE_RM event.
1818 */ 1818 */
1819/* 1819/*
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index a8816a8738f8..97ae98dc95d0 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -935,7 +935,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
935 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ); 935 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ);
936 spin_lock_irq(phba->host->host_lock); 936 spin_lock_irq(phba->host->host_lock);
937 if (++loop_count 937 if (++loop_count
938 > (2 * phba->cfg_nodev_tmo)/LPFC_ABORT_WAIT) 938 > (2 * phba->cfg_devloss_tmo)/LPFC_ABORT_WAIT)
939 break; 939 break;
940 } 940 }
941 941
@@ -978,7 +978,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
978 spin_lock_irq(shost->host_lock); 978 spin_lock_irq(shost->host_lock);
979 /* 979 /*
980 * If target is not in a MAPPED state, delay the reset until 980 * If target is not in a MAPPED state, delay the reset until
981 * target is rediscovered or nodev timeout expires. 981 * target is rediscovered or devloss timeout expires.
982 */ 982 */
983 while ( 1 ) { 983 while ( 1 ) {
984 if (!pnode) 984 if (!pnode)
@@ -1050,7 +1050,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
1050 spin_lock_irq(phba->host->host_lock); 1050 spin_lock_irq(phba->host->host_lock);
1051 1051
1052 if (++loopcnt 1052 if (++loopcnt
1053 > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT) 1053 > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT)
1054 break; 1054 break;
1055 1055
1056 cnt = lpfc_sli_sum_iocb(phba, 1056 cnt = lpfc_sli_sum_iocb(phba,
@@ -1151,7 +1151,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1151 spin_lock_irq(phba->host->host_lock); 1151 spin_lock_irq(phba->host->host_lock);
1152 1152
1153 if (++loopcnt 1153 if (++loopcnt
1154 > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT) 1154 > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT)
1155 break; 1155 break;
1156 1156
1157 cnt = lpfc_sli_sum_iocb(phba, 1157 cnt = lpfc_sli_sum_iocb(phba,
@@ -1249,7 +1249,7 @@ lpfc_slave_configure(struct scsi_device *sdev)
1249 * target pointer is stored in the starget_data for the 1249 * target pointer is stored in the starget_data for the
1250 * driver's sysfs entry point functions. 1250 * driver's sysfs entry point functions.
1251 */ 1251 */
1252 rport->dev_loss_tmo = phba->cfg_nodev_tmo + 5; 1252 rport->dev_loss_tmo = phba->cfg_devloss_tmo;
1253 1253
1254 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 1254 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1255 lpfc_sli_poll_fcp_ring(phba); 1255 lpfc_sli_poll_fcp_ring(phba);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c7091ea29f3f..ac417908b407 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.1.9" 21#define LPFC_DRIVER_VERSION "8.1.10"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24 24
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 89ef34df5a1d..6422de72bf43 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -431,7 +431,7 @@ static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *mat
431 struct fsc_state *state; 431 struct fsc_state *state;
432 struct Scsi_Host *host; 432 struct Scsi_Host *host;
433 void *dma_cmd_space; 433 void *dma_cmd_space;
434 unsigned char *clkprop; 434 const unsigned char *clkprop;
435 int proplen, rc = -ENODEV; 435 int proplen, rc = -ENODEV;
436 436
437 if (macio_resource_count(mdev) != 2 || macio_irq_count(mdev) != 2) { 437 if (macio_resource_count(mdev) != 2 || macio_irq_count(mdev) != 2) {
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 76edbb639d37..b87bef69ba0f 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -2822,9 +2822,7 @@ mega_print_inquiry(char *page, char *scsi_inq)
2822 2822
2823 i = scsi_inq[0] & 0x1f; 2823 i = scsi_inq[0] & 0x1f;
2824 2824
2825 len += sprintf(page+len, " Type: %s ", 2825 len += sprintf(page+len, " Type: %s ", scsi_device_type(i));
2826 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] :
2827 "Unknown ");
2828 2826
2829 len += sprintf(page+len, 2827 len += sprintf(page+len,
2830 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07); 2828 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07);
@@ -3658,8 +3656,9 @@ megadev_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
3658 * Send the request sense data also, irrespective of 3656 * Send the request sense data also, irrespective of
3659 * whether the user has asked for it or not. 3657 * whether the user has asked for it or not.
3660 */ 3658 */
3661 copy_to_user(upthru->reqsensearea, 3659 if (copy_to_user(upthru->reqsensearea,
3662 pthru->reqsensearea, 14); 3660 pthru->reqsensearea, 14))
3661 rval = -EFAULT;
3663 3662
3664freemem_and_return: 3663freemem_and_return:
3665 if( pthru->dataxferlen ) { 3664 if( pthru->dataxferlen ) {
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index cd982c877da0..266b3910846b 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -330,6 +330,21 @@ static struct device_attribute *megaraid_sdev_attrs[] = {
330 NULL, 330 NULL,
331}; 331};
332 332
333/**
334 * megaraid_change_queue_depth - Change the device's queue depth
335 * @sdev: scsi device struct
336 * @qdepth: depth to set
337 *
338 * Return value:
339 * actual depth set
340 **/
341static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth)
342{
343 if (qdepth > MBOX_MAX_SCSI_CMDS)
344 qdepth = MBOX_MAX_SCSI_CMDS;
345 scsi_adjust_queue_depth(sdev, 0, qdepth);
346 return sdev->queue_depth;
347}
333 348
334/* 349/*
335 * Scsi host template for megaraid unified driver 350 * Scsi host template for megaraid unified driver
@@ -343,6 +358,7 @@ static struct scsi_host_template megaraid_template_g = {
343 .eh_device_reset_handler = megaraid_reset_handler, 358 .eh_device_reset_handler = megaraid_reset_handler,
344 .eh_bus_reset_handler = megaraid_reset_handler, 359 .eh_bus_reset_handler = megaraid_reset_handler,
345 .eh_host_reset_handler = megaraid_reset_handler, 360 .eh_host_reset_handler = megaraid_reset_handler,
361 .change_queue_depth = megaraid_change_queue_depth,
346 .use_clustering = ENABLE_CLUSTERING, 362 .use_clustering = ENABLE_CLUSTERING,
347 .sdev_attrs = megaraid_sdev_attrs, 363 .sdev_attrs = megaraid_sdev_attrs,
348 .shost_attrs = megaraid_shost_attrs, 364 .shost_attrs = megaraid_shost_attrs,
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index a8c9627a15c4..4cab5b534b25 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -53,31 +53,15 @@ MODULE_DESCRIPTION("LSI Logic MegaRAID SAS Driver");
53 */ 53 */
54static struct pci_device_id megasas_pci_table[] = { 54static struct pci_device_id megasas_pci_table[] = {
55 55
56 { 56 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
57 PCI_VENDOR_ID_LSI_LOGIC, 57 /* xscale IOP */
58 PCI_DEVICE_ID_LSI_SAS1064R, /* xscale IOP */ 58 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
59 PCI_ANY_ID, 59 /* ppc IOP */
60 PCI_ANY_ID, 60 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
61 }, 61 /* xscale IOP, vega */
62 { 62 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
63 PCI_VENDOR_ID_LSI_LOGIC, 63 /* xscale IOP */
64 PCI_DEVICE_ID_LSI_SAS1078R, /* ppc IOP */ 64 {}
65 PCI_ANY_ID,
66 PCI_ANY_ID,
67 },
68 {
69 PCI_VENDOR_ID_LSI_LOGIC,
70 PCI_DEVICE_ID_LSI_VERDE_ZCR, /* xscale IOP, vega */
71 PCI_ANY_ID,
72 PCI_ANY_ID,
73 },
74 {
75 PCI_VENDOR_ID_DELL,
76 PCI_DEVICE_ID_DELL_PERC5, /* xscale IOP */
77 PCI_ANY_ID,
78 PCI_ANY_ID,
79 },
80 {0} /* Terminating entry */
81}; 65};
82 66
83MODULE_DEVICE_TABLE(pci, megasas_pci_table); 67MODULE_DEVICE_TABLE(pci, megasas_pci_table);
@@ -2854,7 +2838,7 @@ static int __init megasas_init(void)
2854 /* 2838 /*
2855 * Register ourselves as PCI hotplug module 2839 * Register ourselves as PCI hotplug module
2856 */ 2840 */
2857 rval = pci_module_init(&megasas_pci_driver); 2841 rval = pci_register_driver(&megasas_pci_driver);
2858 2842
2859 if (rval) { 2843 if (rval) {
2860 printk(KERN_DEBUG "megasas: PCI hotplug regisration failed \n"); 2844 printk(KERN_DEBUG "megasas: PCI hotplug regisration failed \n");
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 5572981a9f92..592b52afe658 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1850,7 +1850,8 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
1850{ 1850{
1851 struct device_node *mesh = macio_get_of_node(mdev); 1851 struct device_node *mesh = macio_get_of_node(mdev);
1852 struct pci_dev* pdev = macio_get_pci_dev(mdev); 1852 struct pci_dev* pdev = macio_get_pci_dev(mdev);
1853 int tgt, *cfp, minper; 1853 int tgt, minper;
1854 const int *cfp;
1854 struct mesh_state *ms; 1855 struct mesh_state *ms;
1855 struct Scsi_Host *mesh_host; 1856 struct Scsi_Host *mesh_host;
1856 void *dma_cmd_space; 1857 void *dma_cmd_space;
@@ -1939,7 +1940,7 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
1939 ms->tgts[tgt].current_req = NULL; 1940 ms->tgts[tgt].current_req = NULL;
1940 } 1941 }
1941 1942
1942 if ((cfp = (int *) get_property(mesh, "clock-frequency", NULL))) 1943 if ((cfp = get_property(mesh, "clock-frequency", NULL)))
1943 ms->clk_freq = *cfp; 1944 ms->clk_freq = *cfp;
1944 else { 1945 else {
1945 printk(KERN_INFO "mesh: assuming 50MHz clock frequency\n"); 1946 printk(KERN_INFO "mesh: assuming 50MHz clock frequency\n");
diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c
index cb367c2c5c78..9b991b746d1e 100644
--- a/drivers/scsi/mvme147.c
+++ b/drivers/scsi/mvme147.c
@@ -29,7 +29,7 @@ static irqreturn_t mvme147_intr (int irq, void *dummy, struct pt_regs *fp)
29 return IRQ_HANDLED; 29 return IRQ_HANDLED;
30} 30}
31 31
32static int dma_setup (Scsi_Cmnd *cmd, int dir_in) 32static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
33{ 33{
34 unsigned char flags = 0x01; 34 unsigned char flags = 0x01;
35 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 35 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
@@ -57,7 +57,7 @@ static int dma_setup (Scsi_Cmnd *cmd, int dir_in)
57 return 0; 57 return 0;
58} 58}
59 59
60static void dma_stop (struct Scsi_Host *instance, Scsi_Cmnd *SCpnt, 60static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
61 int status) 61 int status)
62{ 62{
63 m147_pcc->dma_cntrl = 0; 63 m147_pcc->dma_cntrl = 0;
@@ -112,7 +112,7 @@ int mvme147_detect(struct scsi_host_template *tpnt)
112 return 0; 112 return 0;
113} 113}
114 114
115static int mvme147_bus_reset(Scsi_Cmnd *cmd) 115static int mvme147_bus_reset(struct scsi_cmnd *cmd)
116{ 116{
117 /* FIXME perform bus-specific reset */ 117 /* FIXME perform bus-specific reset */
118 118
diff --git a/drivers/scsi/mvme147.h b/drivers/scsi/mvme147.h
index 2f56d69bd180..32aee85434d8 100644
--- a/drivers/scsi/mvme147.h
+++ b/drivers/scsi/mvme147.h
@@ -12,10 +12,6 @@
12 12
13int mvme147_detect(struct scsi_host_template *); 13int mvme147_detect(struct scsi_host_template *);
14int mvme147_release(struct Scsi_Host *); 14int mvme147_release(struct Scsi_Host *);
15const char *wd33c93_info(void);
16int wd33c93_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
17int wd33c93_abort(Scsi_Cmnd *);
18int wd33c93_reset(Scsi_Cmnd *, unsigned int);
19 15
20#ifndef CMD_PER_LUN 16#ifndef CMD_PER_LUN
21#define CMD_PER_LUN 2 17#define CMD_PER_LUN 2
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index b332caddd5b3..c51b5769eac8 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -96,24 +96,40 @@ unsigned int scsi_logging_level;
96EXPORT_SYMBOL(scsi_logging_level); 96EXPORT_SYMBOL(scsi_logging_level);
97#endif 97#endif
98 98
99const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] = { 99static const char *const scsi_device_types[] = {
100 "Direct-Access ", 100 "Direct access ",
101 "Sequential-Access", 101 "Sequential access",
102 "Printer ", 102 "Printer ",
103 "Processor ", 103 "Processor ",
104 "WORM ", 104 "WORM ",
105 "CD-ROM ", 105 "CD/DVD ",
106 "Scanner ", 106 "Scanner ",
107 "Optical Device ", 107 "Optical memory ",
108 "Medium Changer ", 108 "Media changer ",
109 "Communications ", 109 "Communications ",
110 "Unknown ", 110 "ASC IT8 ",
111 "Unknown ", 111 "ASC IT8 ",
112 "RAID ", 112 "RAID ",
113 "Enclosure ", 113 "Enclosure ",
114 "Direct-Access-RBC", 114 "Direct access RBC",
115 "Optical card ",
116 "Bridge controller",
117 "Object storage ",
118 "Automation/Drive ",
115}; 119};
116EXPORT_SYMBOL(scsi_device_types); 120
121const char * scsi_device_type(unsigned type)
122{
123 if (type == 0x1e)
124 return "Well-known LUN ";
125 if (type == 0x1f)
126 return "No Device ";
127 if (type > ARRAY_SIZE(scsi_device_types))
128 return "Unknown ";
129 return scsi_device_types[type];
130}
131
132EXPORT_SYMBOL(scsi_device_type);
117 133
118struct scsi_host_cmd_pool { 134struct scsi_host_cmd_pool {
119 kmem_cache_t *slab; 135 kmem_cache_t *slab;
@@ -835,14 +851,14 @@ EXPORT_SYMBOL(scsi_track_queue_full);
835 */ 851 */
836int scsi_device_get(struct scsi_device *sdev) 852int scsi_device_get(struct scsi_device *sdev)
837{ 853{
838 if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL) 854 if (sdev->sdev_state == SDEV_DEL)
839 return -ENXIO; 855 return -ENXIO;
840 if (!get_device(&sdev->sdev_gendev)) 856 if (!get_device(&sdev->sdev_gendev))
841 return -ENXIO; 857 return -ENXIO;
842 if (!try_module_get(sdev->host->hostt->module)) { 858 /* We can fail this if we're doing SCSI operations
843 put_device(&sdev->sdev_gendev); 859 * from module exit (like cache flush) */
844 return -ENXIO; 860 try_module_get(sdev->host->hostt->module);
845 } 861
846 return 0; 862 return 0;
847} 863}
848EXPORT_SYMBOL(scsi_device_get); 864EXPORT_SYMBOL(scsi_device_get);
@@ -857,7 +873,14 @@ EXPORT_SYMBOL(scsi_device_get);
857 */ 873 */
858void scsi_device_put(struct scsi_device *sdev) 874void scsi_device_put(struct scsi_device *sdev)
859{ 875{
860 module_put(sdev->host->hostt->module); 876 struct module *module = sdev->host->hostt->module;
877
878#ifdef CONFIG_MODULE_UNLOAD
879 /* The module refcount will be zero if scsi_device_get()
880 * was called from a module removal routine */
881 if (module && module_refcount(module) != 0)
882 module_put(module);
883#endif
861 put_device(&sdev->sdev_gendev); 884 put_device(&sdev->sdev_gendev);
862} 885}
863EXPORT_SYMBOL(scsi_device_put); 886EXPORT_SYMBOL(scsi_device_put);
@@ -1099,6 +1122,8 @@ static int __init init_scsi(void)
1099 for_each_possible_cpu(i) 1122 for_each_possible_cpu(i)
1100 INIT_LIST_HEAD(&per_cpu(scsi_done_q, i)); 1123 INIT_LIST_HEAD(&per_cpu(scsi_done_q, i));
1101 1124
1125 scsi_netlink_init();
1126
1102 printk(KERN_NOTICE "SCSI subsystem initialized\n"); 1127 printk(KERN_NOTICE "SCSI subsystem initialized\n");
1103 return 0; 1128 return 0;
1104 1129
@@ -1119,6 +1144,7 @@ cleanup_queue:
1119 1144
1120static void __exit exit_scsi(void) 1145static void __exit exit_scsi(void)
1121{ 1146{
1147 scsi_netlink_exit();
1122 scsi_sysfs_unregister(); 1148 scsi_sysfs_unregister();
1123 scsi_exit_sysctl(); 1149 scsi_exit_sysctl();
1124 scsi_exit_hosts(); 1150 scsi_exit_hosts();
diff --git a/drivers/scsi/scsi.h b/drivers/scsi/scsi.h
index f51e466893e7..d5a55fae60e0 100644
--- a/drivers/scsi/scsi.h
+++ b/drivers/scsi/scsi.h
@@ -20,8 +20,6 @@
20#ifndef _SCSI_H 20#ifndef _SCSI_H
21#define _SCSI_H 21#define _SCSI_H
22 22
23#include <linux/config.h> /* for CONFIG_SCSI_LOGGING */
24
25#include <scsi/scsi_cmnd.h> 23#include <scsi/scsi_cmnd.h>
26#include <scsi/scsi_device.h> 24#include <scsi/scsi_device.h>
27#include <scsi/scsi_eh.h> 25#include <scsi/scsi_eh.h>
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index a80303c6b3fd..9c0f35820e3e 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1,5 +1,4 @@
1/* 1/*
2 * linux/kernel/scsi_debug.c
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv 2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale 3 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking 4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
@@ -8,7 +7,9 @@
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * 8 *
10 * This version is more generic, simulating a variable number of disk 9 * This version is more generic, simulating a variable number of disk
11 * (or disk like devices) sharing a common amount of RAM 10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
12 * SAS disks.
12 * 13 *
13 * 14 *
14 * For documentation see http://www.torque.net/sg/sdebug26.html 15 * For documentation see http://www.torque.net/sg/sdebug26.html
@@ -50,8 +51,8 @@
50#include "scsi_logging.h" 51#include "scsi_logging.h"
51#include "scsi_debug.h" 52#include "scsi_debug.h"
52 53
53#define SCSI_DEBUG_VERSION "1.79" 54#define SCSI_DEBUG_VERSION "1.80"
54static const char * scsi_debug_version_date = "20060604"; 55static const char * scsi_debug_version_date = "20060914";
55 56
56/* Additional Sense Code (ASC) used */ 57/* Additional Sense Code (ASC) used */
57#define NO_ADDITIONAL_SENSE 0x0 58#define NO_ADDITIONAL_SENSE 0x0
@@ -86,6 +87,8 @@ static const char * scsi_debug_version_date = "20060604";
86#define DEF_D_SENSE 0 87#define DEF_D_SENSE 0
87#define DEF_NO_LUN_0 0 88#define DEF_NO_LUN_0 0
88#define DEF_VIRTUAL_GB 0 89#define DEF_VIRTUAL_GB 0
90#define DEF_FAKE_RW 0
91#define DEF_VPD_USE_HOSTNO 1
89 92
90/* bit mask values for scsi_debug_opts */ 93/* bit mask values for scsi_debug_opts */
91#define SCSI_DEBUG_OPT_NOISE 1 94#define SCSI_DEBUG_OPT_NOISE 1
@@ -127,6 +130,8 @@ static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
127static int scsi_debug_dsense = DEF_D_SENSE; 130static int scsi_debug_dsense = DEF_D_SENSE;
128static int scsi_debug_no_lun_0 = DEF_NO_LUN_0; 131static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
129static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB; 132static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
133static int scsi_debug_fake_rw = DEF_FAKE_RW;
134static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
130 135
131static int scsi_debug_cmnd_count = 0; 136static int scsi_debug_cmnd_count = 0;
132 137
@@ -423,6 +428,8 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
423 case READ_6: 428 case READ_6:
424 if ((errsts = check_readiness(SCpnt, 0, devip))) 429 if ((errsts = check_readiness(SCpnt, 0, devip)))
425 break; 430 break;
431 if (scsi_debug_fake_rw)
432 break;
426 if ((*cmd) == READ_16) { 433 if ((*cmd) == READ_16) {
427 for (lba = 0, j = 0; j < 8; ++j) { 434 for (lba = 0, j = 0; j < 8; ++j) {
428 if (j > 0) 435 if (j > 0)
@@ -465,6 +472,8 @@ int scsi_debug_queuecommand(struct scsi_cmnd * SCpnt, done_funct_t done)
465 case WRITE_6: 472 case WRITE_6:
466 if ((errsts = check_readiness(SCpnt, 0, devip))) 473 if ((errsts = check_readiness(SCpnt, 0, devip)))
467 break; 474 break;
475 if (scsi_debug_fake_rw)
476 break;
468 if ((*cmd) == WRITE_16) { 477 if ((*cmd) == WRITE_16) {
469 for (lba = 0, j = 0; j < 8; ++j) { 478 for (lba = 0, j = 0; j < 8; ++j) {
470 if (j > 0) 479 if (j > 0)
@@ -941,6 +950,8 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
941 char lu_id_str[6]; 950 char lu_id_str[6];
942 int host_no = devip->sdbg_host->shost->host_no; 951 int host_no = devip->sdbg_host->shost->host_no;
943 952
953 if (0 == scsi_debug_vpd_use_hostno)
954 host_no = 0;
944 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) + 955 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
945 (devip->target * 1000) + devip->lun); 956 (devip->target * 1000) + devip->lun);
946 target_dev_id = ((host_no + 1) * 2000) + 957 target_dev_id = ((host_no + 1) * 2000) +
@@ -1059,19 +1070,6 @@ static int resp_requests(struct scsi_cmnd * scp,
1059 arr[12] = THRESHOLD_EXCEEDED; 1070 arr[12] = THRESHOLD_EXCEEDED;
1060 arr[13] = 0xff; /* TEST set and MRIE==6 */ 1071 arr[13] = 0xff; /* TEST set and MRIE==6 */
1061 } 1072 }
1062 } else if (devip->stopped) {
1063 if (want_dsense) {
1064 arr[0] = 0x72;
1065 arr[1] = 0x0; /* NO_SENSE in sense_key */
1066 arr[2] = LOW_POWER_COND_ON;
1067 arr[3] = 0x0; /* TEST set and MRIE==6 */
1068 } else {
1069 arr[0] = 0x70;
1070 arr[2] = 0x0; /* NO_SENSE in sense_key */
1071 arr[7] = 0xa; /* 18 byte sense buffer */
1072 arr[12] = LOW_POWER_COND_ON;
1073 arr[13] = 0x0; /* TEST set and MRIE==6 */
1074 }
1075 } else { 1073 } else {
1076 memcpy(arr, sbuff, SDEBUG_SENSE_LEN); 1074 memcpy(arr, sbuff, SDEBUG_SENSE_LEN);
1077 if ((cmd[1] & 1) && (! scsi_debug_dsense)) { 1075 if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
@@ -1325,21 +1323,26 @@ static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1325static int resp_mode_sense(struct scsi_cmnd * scp, int target, 1323static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1326 struct sdebug_dev_info * devip) 1324 struct sdebug_dev_info * devip)
1327{ 1325{
1328 unsigned char dbd; 1326 unsigned char dbd, llbaa;
1329 int pcontrol, pcode, subpcode; 1327 int pcontrol, pcode, subpcode, bd_len;
1330 unsigned char dev_spec; 1328 unsigned char dev_spec;
1331 int alloc_len, msense_6, offset, len, errsts, target_dev_id; 1329 int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
1332 unsigned char * ap; 1330 unsigned char * ap;
1333 unsigned char arr[SDEBUG_MAX_MSENSE_SZ]; 1331 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1334 unsigned char *cmd = (unsigned char *)scp->cmnd; 1332 unsigned char *cmd = (unsigned char *)scp->cmnd;
1335 1333
1336 if ((errsts = check_readiness(scp, 1, devip))) 1334 if ((errsts = check_readiness(scp, 1, devip)))
1337 return errsts; 1335 return errsts;
1338 dbd = cmd[1] & 0x8; 1336 dbd = !!(cmd[1] & 0x8);
1339 pcontrol = (cmd[2] & 0xc0) >> 6; 1337 pcontrol = (cmd[2] & 0xc0) >> 6;
1340 pcode = cmd[2] & 0x3f; 1338 pcode = cmd[2] & 0x3f;
1341 subpcode = cmd[3]; 1339 subpcode = cmd[3];
1342 msense_6 = (MODE_SENSE == cmd[0]); 1340 msense_6 = (MODE_SENSE == cmd[0]);
1341 llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1342 if ((0 == scsi_debug_ptype) && (0 == dbd))
1343 bd_len = llbaa ? 16 : 8;
1344 else
1345 bd_len = 0;
1343 alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]); 1346 alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1344 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ); 1347 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1345 if (0x3 == pcontrol) { /* Saving values not supported */ 1348 if (0x3 == pcontrol) { /* Saving values not supported */
@@ -1349,15 +1352,58 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1349 } 1352 }
1350 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) + 1353 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1351 (devip->target * 1000) - 3; 1354 (devip->target * 1000) - 3;
1352 dev_spec = DEV_READONLY(target) ? 0x80 : 0x0; 1355 /* set DPOFUA bit for disks */
1356 if (0 == scsi_debug_ptype)
1357 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1358 else
1359 dev_spec = 0x0;
1353 if (msense_6) { 1360 if (msense_6) {
1354 arr[2] = dev_spec; 1361 arr[2] = dev_spec;
1362 arr[3] = bd_len;
1355 offset = 4; 1363 offset = 4;
1356 } else { 1364 } else {
1357 arr[3] = dev_spec; 1365 arr[3] = dev_spec;
1366 if (16 == bd_len)
1367 arr[4] = 0x1; /* set LONGLBA bit */
1368 arr[7] = bd_len; /* assume 255 or less */
1358 offset = 8; 1369 offset = 8;
1359 } 1370 }
1360 ap = arr + offset; 1371 ap = arr + offset;
1372 if ((bd_len > 0) && (0 == sdebug_capacity)) {
1373 if (scsi_debug_virtual_gb > 0) {
1374 sdebug_capacity = 2048 * 1024;
1375 sdebug_capacity *= scsi_debug_virtual_gb;
1376 } else
1377 sdebug_capacity = sdebug_store_sectors;
1378 }
1379 if (8 == bd_len) {
1380 if (sdebug_capacity > 0xfffffffe) {
1381 ap[0] = 0xff;
1382 ap[1] = 0xff;
1383 ap[2] = 0xff;
1384 ap[3] = 0xff;
1385 } else {
1386 ap[0] = (sdebug_capacity >> 24) & 0xff;
1387 ap[1] = (sdebug_capacity >> 16) & 0xff;
1388 ap[2] = (sdebug_capacity >> 8) & 0xff;
1389 ap[3] = sdebug_capacity & 0xff;
1390 }
1391 ap[6] = (SECT_SIZE_PER(target) >> 8) & 0xff;
1392 ap[7] = SECT_SIZE_PER(target) & 0xff;
1393 offset += bd_len;
1394 ap = arr + offset;
1395 } else if (16 == bd_len) {
1396 unsigned long long capac = sdebug_capacity;
1397
1398 for (k = 0; k < 8; ++k, capac >>= 8)
1399 ap[7 - k] = capac & 0xff;
1400 ap[12] = (SECT_SIZE_PER(target) >> 24) & 0xff;
1401 ap[13] = (SECT_SIZE_PER(target) >> 16) & 0xff;
1402 ap[14] = (SECT_SIZE_PER(target) >> 8) & 0xff;
1403 ap[15] = SECT_SIZE_PER(target) & 0xff;
1404 offset += bd_len;
1405 ap = arr + offset;
1406 }
1361 1407
1362 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) { 1408 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1363 /* TODO: Control Extension page */ 1409 /* TODO: Control Extension page */
@@ -1471,7 +1517,7 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1471 " IO sent=%d bytes\n", param_len, res); 1517 " IO sent=%d bytes\n", param_len, res);
1472 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2); 1518 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1473 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]); 1519 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1474 if ((md_len > 2) || (0 != bd_len)) { 1520 if (md_len > 2) {
1475 mk_sense_buffer(devip, ILLEGAL_REQUEST, 1521 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1476 INVALID_FIELD_IN_PARAM_LIST, 0); 1522 INVALID_FIELD_IN_PARAM_LIST, 0);
1477 return check_condition_result; 1523 return check_condition_result;
@@ -1544,7 +1590,7 @@ static int resp_ie_l_pg(unsigned char * arr)
1544static int resp_log_sense(struct scsi_cmnd * scp, 1590static int resp_log_sense(struct scsi_cmnd * scp,
1545 struct sdebug_dev_info * devip) 1591 struct sdebug_dev_info * devip)
1546{ 1592{
1547 int ppc, sp, pcontrol, pcode, alloc_len, errsts, len, n; 1593 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1548 unsigned char arr[SDEBUG_MAX_LSENSE_SZ]; 1594 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1549 unsigned char *cmd = (unsigned char *)scp->cmnd; 1595 unsigned char *cmd = (unsigned char *)scp->cmnd;
1550 1596
@@ -1560,23 +1606,63 @@ static int resp_log_sense(struct scsi_cmnd * scp,
1560 } 1606 }
1561 pcontrol = (cmd[2] & 0xc0) >> 6; 1607 pcontrol = (cmd[2] & 0xc0) >> 6;
1562 pcode = cmd[2] & 0x3f; 1608 pcode = cmd[2] & 0x3f;
1609 subpcode = cmd[3] & 0xff;
1563 alloc_len = (cmd[7] << 8) + cmd[8]; 1610 alloc_len = (cmd[7] << 8) + cmd[8];
1564 arr[0] = pcode; 1611 arr[0] = pcode;
1565 switch (pcode) { 1612 if (0 == subpcode) {
1566 case 0x0: /* Supported log pages log page */ 1613 switch (pcode) {
1567 n = 4; 1614 case 0x0: /* Supported log pages log page */
1568 arr[n++] = 0x0; /* this page */ 1615 n = 4;
1569 arr[n++] = 0xd; /* Temperature */ 1616 arr[n++] = 0x0; /* this page */
1570 arr[n++] = 0x2f; /* Informational exceptions */ 1617 arr[n++] = 0xd; /* Temperature */
1571 arr[3] = n - 4; 1618 arr[n++] = 0x2f; /* Informational exceptions */
1572 break; 1619 arr[3] = n - 4;
1573 case 0xd: /* Temperature log page */ 1620 break;
1574 arr[3] = resp_temp_l_pg(arr + 4); 1621 case 0xd: /* Temperature log page */
1575 break; 1622 arr[3] = resp_temp_l_pg(arr + 4);
1576 case 0x2f: /* Informational exceptions log page */ 1623 break;
1577 arr[3] = resp_ie_l_pg(arr + 4); 1624 case 0x2f: /* Informational exceptions log page */
1578 break; 1625 arr[3] = resp_ie_l_pg(arr + 4);
1579 default: 1626 break;
1627 default:
1628 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1629 INVALID_FIELD_IN_CDB, 0);
1630 return check_condition_result;
1631 }
1632 } else if (0xff == subpcode) {
1633 arr[0] |= 0x40;
1634 arr[1] = subpcode;
1635 switch (pcode) {
1636 case 0x0: /* Supported log pages and subpages log page */
1637 n = 4;
1638 arr[n++] = 0x0;
1639 arr[n++] = 0x0; /* 0,0 page */
1640 arr[n++] = 0x0;
1641 arr[n++] = 0xff; /* this page */
1642 arr[n++] = 0xd;
1643 arr[n++] = 0x0; /* Temperature */
1644 arr[n++] = 0x2f;
1645 arr[n++] = 0x0; /* Informational exceptions */
1646 arr[3] = n - 4;
1647 break;
1648 case 0xd: /* Temperature subpages */
1649 n = 4;
1650 arr[n++] = 0xd;
1651 arr[n++] = 0x0; /* Temperature */
1652 arr[3] = n - 4;
1653 break;
1654 case 0x2f: /* Informational exceptions subpages */
1655 n = 4;
1656 arr[n++] = 0x2f;
1657 arr[n++] = 0x0; /* Informational exceptions */
1658 arr[3] = n - 4;
1659 break;
1660 default:
1661 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1662 INVALID_FIELD_IN_CDB, 0);
1663 return check_condition_result;
1664 }
1665 } else {
1580 mk_sense_buffer(devip, ILLEGAL_REQUEST, 1666 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1581 INVALID_FIELD_IN_CDB, 0); 1667 INVALID_FIELD_IN_CDB, 0);
1582 return check_condition_result; 1668 return check_condition_result;
@@ -2151,11 +2237,18 @@ static int schedule_resp(struct scsi_cmnd * cmnd,
2151 } 2237 }
2152} 2238}
2153 2239
2240/* Note: The following macros create attribute files in the
2241 /sys/module/scsi_debug/parameters directory. Unfortunately this
2242 driver is unaware of a change and cannot trigger auxiliary actions
2243 as it can when the corresponding attribute in the
2244 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2245 */
2154module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR); 2246module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
2155module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR); 2247module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
2156module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO); 2248module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
2157module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR); 2249module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
2158module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR); 2250module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
2251module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
2159module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR); 2252module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
2160module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR); 2253module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
2161module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO); 2254module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
@@ -2164,6 +2257,8 @@ module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
2164module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR); 2257module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
2165module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO); 2258module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
2166module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR); 2259module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
2260module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
2261 S_IRUGO | S_IWUSR);
2167 2262
2168MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); 2263MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2169MODULE_DESCRIPTION("SCSI debug adapter driver"); 2264MODULE_DESCRIPTION("SCSI debug adapter driver");
@@ -2175,6 +2270,7 @@ MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
2175MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)"); 2270MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
2176MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)"); 2271MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
2177MODULE_PARM_DESC(every_nth, "timeout every nth command(def=100)"); 2272MODULE_PARM_DESC(every_nth, "timeout every nth command(def=100)");
2273MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
2178MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)"); 2274MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2179MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)"); 2275MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
2180MODULE_PARM_DESC(num_parts, "number of partitions(def=0)"); 2276MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
@@ -2183,6 +2279,7 @@ MODULE_PARM_DESC(opts, "1->noise, 2->medium_error, 4->... (def=0)");
2183MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])"); 2279MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2184MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])"); 2280MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2185MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)"); 2281MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2282MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2186 2283
2187 2284
2188static char sdebug_info[256]; 2285static char sdebug_info[256];
@@ -2334,6 +2431,24 @@ static ssize_t sdebug_dsense_store(struct device_driver * ddp,
2334DRIVER_ATTR(dsense, S_IRUGO | S_IWUSR, sdebug_dsense_show, 2431DRIVER_ATTR(dsense, S_IRUGO | S_IWUSR, sdebug_dsense_show,
2335 sdebug_dsense_store); 2432 sdebug_dsense_store);
2336 2433
2434static ssize_t sdebug_fake_rw_show(struct device_driver * ddp, char * buf)
2435{
2436 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
2437}
2438static ssize_t sdebug_fake_rw_store(struct device_driver * ddp,
2439 const char * buf, size_t count)
2440{
2441 int n;
2442
2443 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2444 scsi_debug_fake_rw = n;
2445 return count;
2446 }
2447 return -EINVAL;
2448}
2449DRIVER_ATTR(fake_rw, S_IRUGO | S_IWUSR, sdebug_fake_rw_show,
2450 sdebug_fake_rw_store);
2451
2337static ssize_t sdebug_no_lun_0_show(struct device_driver * ddp, char * buf) 2452static ssize_t sdebug_no_lun_0_show(struct device_driver * ddp, char * buf)
2338{ 2453{
2339 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0); 2454 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
@@ -2487,6 +2602,31 @@ static ssize_t sdebug_add_host_store(struct device_driver * ddp,
2487DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show, 2602DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show,
2488 sdebug_add_host_store); 2603 sdebug_add_host_store);
2489 2604
2605static ssize_t sdebug_vpd_use_hostno_show(struct device_driver * ddp,
2606 char * buf)
2607{
2608 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
2609}
2610static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp,
2611 const char * buf, size_t count)
2612{
2613 int n;
2614
2615 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2616 scsi_debug_vpd_use_hostno = n;
2617 return count;
2618 }
2619 return -EINVAL;
2620}
2621DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show,
2622 sdebug_vpd_use_hostno_store);
2623
2624/* Note: The following function creates attribute files in the
2625 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
2626 files (over those found in the /sys/module/scsi_debug/parameters
2627 directory) is that auxiliary actions can be triggered when an attribute
2628 is changed. For example see: sdebug_add_host_store() above.
2629 */
2490static int do_create_driverfs_files(void) 2630static int do_create_driverfs_files(void)
2491{ 2631{
2492 int ret; 2632 int ret;
@@ -2496,23 +2636,31 @@ static int do_create_driverfs_files(void)
2496 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb); 2636 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
2497 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense); 2637 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense);
2498 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth); 2638 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
2639 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
2499 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns); 2640 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
2500 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts); 2641 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
2501 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts); 2642 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
2643 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
2502 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype); 2644 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
2503 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts); 2645 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);
2504 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); 2646 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
2647 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
2648 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
2505 return ret; 2649 return ret;
2506} 2650}
2507 2651
2508static void do_remove_driverfs_files(void) 2652static void do_remove_driverfs_files(void)
2509{ 2653{
2654 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
2655 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
2510 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); 2656 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
2511 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts); 2657 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts);
2512 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype); 2658 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype);
2513 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
2514 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts); 2659 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
2660 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
2661 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
2515 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns); 2662 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
2663 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
2516 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth); 2664 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
2517 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dsense); 2665 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dsense);
2518 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb); 2666 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 077c1c691210..d6743b959a72 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -551,7 +551,15 @@ static void scsi_run_queue(struct request_queue *q)
551 list_del_init(&sdev->starved_entry); 551 list_del_init(&sdev->starved_entry);
552 spin_unlock_irqrestore(shost->host_lock, flags); 552 spin_unlock_irqrestore(shost->host_lock, flags);
553 553
554 blk_run_queue(sdev->request_queue); 554
555 if (test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
556 !test_and_set_bit(QUEUE_FLAG_REENTER,
557 &sdev->request_queue->queue_flags)) {
558 blk_run_queue(sdev->request_queue);
559 clear_bit(QUEUE_FLAG_REENTER,
560 &sdev->request_queue->queue_flags);
561 } else
562 blk_run_queue(sdev->request_queue);
555 563
556 spin_lock_irqsave(shost->host_lock, flags); 564 spin_lock_irqsave(shost->host_lock, flags);
557 if (unlikely(!list_empty(&sdev->starved_entry))) 565 if (unlikely(!list_empty(&sdev->starved_entry)))
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
new file mode 100644
index 000000000000..1b59b27e887f
--- /dev/null
+++ b/drivers/scsi/scsi_netlink.c
@@ -0,0 +1,199 @@
1/*
2 * scsi_netlink.c - SCSI Transport Netlink Interface
3 *
4 * Copyright (C) 2006 James Smart, Emulex Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21#include <linux/time.h>
22#include <linux/jiffies.h>
23#include <linux/security.h>
24#include <net/sock.h>
25#include <net/netlink.h>
26
27#include <scsi/scsi_netlink.h>
28#include "scsi_priv.h"
29
30struct sock *scsi_nl_sock = NULL;
31EXPORT_SYMBOL_GPL(scsi_nl_sock);
32
33
34/**
35 * scsi_nl_rcv_msg -
36 * Receive message handler. Extracts message from a receive buffer.
37 * Validates message header and calls appropriate transport message handler
38 *
39 * @skb: socket receive buffer
40 *
41 **/
42static void
43scsi_nl_rcv_msg(struct sk_buff *skb)
44{
45 struct nlmsghdr *nlh;
46 struct scsi_nl_hdr *hdr;
47 uint32_t rlen;
48 int err;
49
50 while (skb->len >= NLMSG_SPACE(0)) {
51 err = 0;
52
53 nlh = (struct nlmsghdr *) skb->data;
54 if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) ||
55 (skb->len < nlh->nlmsg_len)) {
56 printk(KERN_WARNING "%s: discarding partial skb\n",
57 __FUNCTION__);
58 return;
59 }
60
61 rlen = NLMSG_ALIGN(nlh->nlmsg_len);
62 if (rlen > skb->len)
63 rlen = skb->len;
64
65 if (nlh->nlmsg_type != SCSI_TRANSPORT_MSG) {
66 err = -EBADMSG;
67 goto next_msg;
68 }
69
70 hdr = NLMSG_DATA(nlh);
71 if ((hdr->version != SCSI_NL_VERSION) ||
72 (hdr->magic != SCSI_NL_MAGIC)) {
73 err = -EPROTOTYPE;
74 goto next_msg;
75 }
76
77 if (security_netlink_recv(skb, CAP_SYS_ADMIN)) {
78 err = -EPERM;
79 goto next_msg;
80 }
81
82 if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) {
83 printk(KERN_WARNING "%s: discarding partial message\n",
84 __FUNCTION__);
85 return;
86 }
87
88 /*
89 * We currently don't support anyone sending us a message
90 */
91
92next_msg:
93 if ((err) || (nlh->nlmsg_flags & NLM_F_ACK))
94 netlink_ack(skb, nlh, err);
95
96 skb_pull(skb, rlen);
97 }
98}
99
100
101/**
102 * scsi_nl_rcv_msg -
103 * Receive handler for a socket. Extracts a received message buffer from
104 * the socket, and starts message processing.
105 *
106 * @sk: socket
107 * @len: unused
108 *
109 **/
110static void
111scsi_nl_rcv(struct sock *sk, int len)
112{
113 struct sk_buff *skb;
114
115 while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
116 scsi_nl_rcv_msg(skb);
117 kfree_skb(skb);
118 }
119}
120
121
122/**
123 * scsi_nl_rcv_event -
124 * Event handler for a netlink socket.
125 *
126 * @this: event notifier block
127 * @event: event type
128 * @ptr: event payload
129 *
130 **/
131static int
132scsi_nl_rcv_event(struct notifier_block *this, unsigned long event, void *ptr)
133{
134 struct netlink_notify *n = ptr;
135
136 if (n->protocol != NETLINK_SCSITRANSPORT)
137 return NOTIFY_DONE;
138
139 /*
140 * Currently, we are not tracking PID's, etc. There is nothing
141 * to handle.
142 */
143
144 return NOTIFY_DONE;
145}
146
147static struct notifier_block scsi_netlink_notifier = {
148 .notifier_call = scsi_nl_rcv_event,
149};
150
151
152/**
153 * scsi_netlink_init -
154 * Called by SCSI subsystem to intialize the SCSI transport netlink
155 * interface
156 *
157 **/
158void
159scsi_netlink_init(void)
160{
161 int error;
162
163 error = netlink_register_notifier(&scsi_netlink_notifier);
164 if (error) {
165 printk(KERN_ERR "%s: register of event handler failed - %d\n",
166 __FUNCTION__, error);
167 return;
168 }
169
170 scsi_nl_sock = netlink_kernel_create(NETLINK_SCSITRANSPORT,
171 SCSI_NL_GRP_CNT, scsi_nl_rcv, THIS_MODULE);
172 if (!scsi_nl_sock) {
173 printk(KERN_ERR "%s: register of recieve handler failed\n",
174 __FUNCTION__);
175 netlink_unregister_notifier(&scsi_netlink_notifier);
176 }
177
178 return;
179}
180
181
182/**
183 * scsi_netlink_exit -
184 * Called by SCSI subsystem to disable the SCSI transport netlink
185 * interface
186 *
187 **/
188void
189scsi_netlink_exit(void)
190{
191 if (scsi_nl_sock) {
192 sock_release(scsi_nl_sock->sk_socket);
193 netlink_unregister_notifier(&scsi_netlink_notifier);
194 }
195
196 return;
197}
198
199
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index ae24c85aaeea..5d023d44e5e7 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -8,6 +8,7 @@ struct scsi_cmnd;
8struct scsi_device; 8struct scsi_device;
9struct scsi_host_template; 9struct scsi_host_template;
10struct Scsi_Host; 10struct Scsi_Host;
11struct scsi_nl_hdr;
11 12
12 13
13/* 14/*
@@ -110,6 +111,16 @@ extern void __scsi_remove_device(struct scsi_device *);
110 111
111extern struct bus_type scsi_bus_type; 112extern struct bus_type scsi_bus_type;
112 113
114/* scsi_netlink.c */
115#ifdef CONFIG_SCSI_NETLINK
116extern void scsi_netlink_init(void);
117extern void scsi_netlink_exit(void);
118extern struct sock *scsi_nl_sock;
119#else
120static inline void scsi_netlink_init(void) {}
121static inline void scsi_netlink_exit(void) {}
122#endif
123
113/* 124/*
114 * internal scsi timeout functions: for use by mid-layer and transport 125 * internal scsi timeout functions: for use by mid-layer and transport
115 * classes. 126 * classes.
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index 55200e4fdf11..524a5f7a5193 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -178,9 +178,7 @@ static int proc_print_scsidevice(struct device *dev, void *data)
178 178
179 seq_printf(s, "\n"); 179 seq_printf(s, "\n");
180 180
181 seq_printf(s, " Type: %s ", 181 seq_printf(s, " Type: %s ", scsi_device_type(sdev->type));
182 sdev->type < MAX_SCSI_DEVICE_CODE ?
183 scsi_device_types[(int) sdev->type] : "Unknown ");
184 seq_printf(s, " ANSI" 182 seq_printf(s, " ANSI"
185 " SCSI revision: %02x", (sdev->scsi_level - 1) ? 183 " SCSI revision: %02x", (sdev->scsi_level - 1) ?
186 sdev->scsi_level - 1 : 1); 184 sdev->scsi_level - 1 : 1);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 1bd92b9b46d9..fd9e281c3bfe 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -134,59 +134,6 @@ static void scsi_unlock_floptical(struct scsi_device *sdev,
134} 134}
135 135
136/** 136/**
137 * print_inquiry - printk the inquiry information
138 * @inq_result: printk this SCSI INQUIRY
139 *
140 * Description:
141 * printk the vendor, model, and other information found in the
142 * INQUIRY data in @inq_result.
143 *
144 * Notes:
145 * Remove this, and replace with a hotplug event that logs any
146 * relevant information.
147 **/
148static void print_inquiry(unsigned char *inq_result)
149{
150 int i;
151
152 printk(KERN_NOTICE " Vendor: ");
153 for (i = 8; i < 16; i++)
154 if (inq_result[i] >= 0x20 && i < inq_result[4] + 5)
155 printk("%c", inq_result[i]);
156 else
157 printk(" ");
158
159 printk(" Model: ");
160 for (i = 16; i < 32; i++)
161 if (inq_result[i] >= 0x20 && i < inq_result[4] + 5)
162 printk("%c", inq_result[i]);
163 else
164 printk(" ");
165
166 printk(" Rev: ");
167 for (i = 32; i < 36; i++)
168 if (inq_result[i] >= 0x20 && i < inq_result[4] + 5)
169 printk("%c", inq_result[i]);
170 else
171 printk(" ");
172
173 printk("\n");
174
175 i = inq_result[0] & 0x1f;
176
177 printk(KERN_NOTICE " Type: %s ",
178 i <
179 MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] :
180 "Unknown ");
181 printk(" ANSI SCSI revision: %02x",
182 inq_result[2] & 0x07);
183 if ((inq_result[2] & 0x07) == 1 && (inq_result[3] & 0x0f) == 1)
184 printk(" CCS\n");
185 else
186 printk("\n");
187}
188
189/**
190 * scsi_alloc_sdev - allocate and setup a scsi_Device 137 * scsi_alloc_sdev - allocate and setup a scsi_Device
191 * 138 *
192 * Description: 139 * Description:
@@ -319,6 +266,18 @@ static struct scsi_target *__scsi_find_target(struct device *parent,
319 return found_starget; 266 return found_starget;
320} 267}
321 268
269/**
270 * scsi_alloc_target - allocate a new or find an existing target
271 * @parent: parent of the target (need not be a scsi host)
272 * @channel: target channel number (zero if no channels)
273 * @id: target id number
274 *
275 * Return an existing target if one exists, provided it hasn't already
276 * gone into STARGET_DEL state, otherwise allocate a new target.
277 *
278 * The target is returned with an incremented reference, so the caller
279 * is responsible for both reaping and doing a last put
280 */
322static struct scsi_target *scsi_alloc_target(struct device *parent, 281static struct scsi_target *scsi_alloc_target(struct device *parent,
323 int channel, uint id) 282 int channel, uint id)
324{ 283{
@@ -384,14 +343,15 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
384 return NULL; 343 return NULL;
385 } 344 }
386 } 345 }
346 get_device(dev);
387 347
388 return starget; 348 return starget;
389 349
390 found: 350 found:
391 found_target->reap_ref++; 351 found_target->reap_ref++;
392 spin_unlock_irqrestore(shost->host_lock, flags); 352 spin_unlock_irqrestore(shost->host_lock, flags);
393 put_device(parent);
394 if (found_target->state != STARGET_DEL) { 353 if (found_target->state != STARGET_DEL) {
354 put_device(parent);
395 kfree(starget); 355 kfree(starget);
396 return found_target; 356 return found_target;
397 } 357 }
@@ -450,6 +410,32 @@ void scsi_target_reap(struct scsi_target *starget)
450} 410}
451 411
452/** 412/**
413 * sanitize_inquiry_string - remove non-graphical chars from an INQUIRY result string
414 * @s: INQUIRY result string to sanitize
415 * @len: length of the string
416 *
417 * Description:
418 * The SCSI spec says that INQUIRY vendor, product, and revision
419 * strings must consist entirely of graphic ASCII characters,
420 * padded on the right with spaces. Since not all devices obey
421 * this rule, we will replace non-graphic or non-ASCII characters
422 * with spaces. Exception: a NUL character is interpreted as a
423 * string terminator, so all the following characters are set to
424 * spaces.
425 **/
426static void sanitize_inquiry_string(unsigned char *s, int len)
427{
428 int terminated = 0;
429
430 for (; len > 0; (--len, ++s)) {
431 if (*s == 0)
432 terminated = 1;
433 if (terminated || *s < 0x20 || *s > 0x7e)
434 *s = ' ';
435 }
436}
437
438/**
453 * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY 439 * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY
454 * @sdev: scsi_device to probe 440 * @sdev: scsi_device to probe
455 * @inq_result: area to store the INQUIRY result 441 * @inq_result: area to store the INQUIRY result
@@ -463,7 +449,7 @@ void scsi_target_reap(struct scsi_target *starget)
463 * INQUIRY data is in @inq_result; the scsi_level and INQUIRY length 449 * INQUIRY data is in @inq_result; the scsi_level and INQUIRY length
464 * are copied to the scsi_device any flags value is stored in *@bflags. 450 * are copied to the scsi_device any flags value is stored in *@bflags.
465 **/ 451 **/
466static int scsi_probe_lun(struct scsi_device *sdev, char *inq_result, 452static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
467 int result_len, int *bflags) 453 int result_len, int *bflags)
468{ 454{
469 unsigned char scsi_cmd[MAX_COMMAND_SIZE]; 455 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
@@ -522,7 +508,11 @@ static int scsi_probe_lun(struct scsi_device *sdev, char *inq_result,
522 } 508 }
523 509
524 if (result == 0) { 510 if (result == 0) {
525 response_len = (unsigned char) inq_result[4] + 5; 511 sanitize_inquiry_string(&inq_result[8], 8);
512 sanitize_inquiry_string(&inq_result[16], 16);
513 sanitize_inquiry_string(&inq_result[32], 4);
514
515 response_len = inq_result[4] + 5;
526 if (response_len > 255) 516 if (response_len > 255)
527 response_len = first_inquiry_len; /* sanity */ 517 response_len = first_inquiry_len; /* sanity */
528 518
@@ -628,7 +618,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, char *inq_result,
628 * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device 618 * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
629 * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized 619 * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
630 **/ 620 **/
631static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags) 621static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
622 int *bflags)
632{ 623{
633 /* 624 /*
634 * XXX do not save the inquiry, since it can change underneath us, 625 * XXX do not save the inquiry, since it can change underneath us,
@@ -653,9 +644,8 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags)
653 if (*bflags & BLIST_ISROM) { 644 if (*bflags & BLIST_ISROM) {
654 /* 645 /*
655 * It would be better to modify sdev->type, and set 646 * It would be better to modify sdev->type, and set
656 * sdev->removable, but then the print_inquiry() output 647 * sdev->removable; this can now be done since
657 * would not show TYPE_ROM; if print_inquiry() is removed 648 * print_inquiry has gone away.
658 * the issue goes away.
659 */ 649 */
660 inq_result[0] = TYPE_ROM; 650 inq_result[0] = TYPE_ROM;
661 inq_result[1] |= 0x80; /* removable */ 651 inq_result[1] |= 0x80; /* removable */
@@ -684,8 +674,6 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags)
684 printk(KERN_INFO "scsi: unknown device type %d\n", sdev->type); 674 printk(KERN_INFO "scsi: unknown device type %d\n", sdev->type);
685 } 675 }
686 676
687 print_inquiry(inq_result);
688
689 /* 677 /*
690 * For a peripheral qualifier (PQ) value of 1 (001b), the SCSI 678 * For a peripheral qualifier (PQ) value of 1 (001b), the SCSI
691 * spec says: The device server is capable of supporting the 679 * spec says: The device server is capable of supporting the
@@ -715,6 +703,12 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags)
715 if (inq_result[7] & 0x10) 703 if (inq_result[7] & 0x10)
716 sdev->sdtr = 1; 704 sdev->sdtr = 1;
717 705
706 sdev_printk(KERN_NOTICE, sdev, "%s %.8s %.16s %.4s PQ: %d "
707 "ANSI: %d%s\n", scsi_device_type(sdev->type),
708 sdev->vendor, sdev->model, sdev->rev,
709 sdev->inq_periph_qual, inq_result[2] & 0x07,
710 (inq_result[3] & 0x0f) == 1 ? " CCS" : "");
711
718 /* 712 /*
719 * End sysfs code. 713 * End sysfs code.
720 */ 714 */
@@ -943,11 +937,26 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
943 } 937 }
944 938
945 /* 939 /*
946 * Non-standard SCSI targets may set the PDT to 0x1f (unknown or 940 * Some targets may set slight variations of PQ and PDT to signal
947 * no device type) instead of using the Peripheral Qualifier to 941 * that no LUN is present, so don't add sdev in these cases.
948 * indicate that no LUN is present. For example, USB UFI does this. 942 * Two specific examples are:
943 * 1) NetApp targets: return PQ=1, PDT=0x1f
944 * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
945 * in the UFI 1.0 spec (we cannot rely on reserved bits).
946 *
947 * References:
948 * 1) SCSI SPC-3, pp. 145-146
949 * PQ=1: "A peripheral device having the specified peripheral
950 * device type is not connected to this logical unit. However, the
951 * device server is capable of supporting the specified peripheral
952 * device type on this logical unit."
953 * PDT=0x1f: "Unknown or no device type"
954 * 2) USB UFI 1.0, p. 20
955 * PDT=00h Direct-access device (floppy)
956 * PDT=1Fh none (no FDD connected to the requested logical unit)
949 */ 957 */
950 if (starget->pdt_1f_for_no_lun && (result[0] & 0x1f) == 0x1f) { 958 if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) &&
959 (result[0] & 0x1f) == 0x1f) {
951 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO 960 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO
952 "scsi scan: peripheral device type" 961 "scsi scan: peripheral device type"
953 " of 31, no device added\n")); 962 " of 31, no device added\n"));
@@ -1345,7 +1354,6 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1345 if (!starget) 1354 if (!starget)
1346 return ERR_PTR(-ENOMEM); 1355 return ERR_PTR(-ENOMEM);
1347 1356
1348 get_device(&starget->dev);
1349 mutex_lock(&shost->scan_mutex); 1357 mutex_lock(&shost->scan_mutex);
1350 if (scsi_host_scan_allowed(shost)) 1358 if (scsi_host_scan_allowed(shost))
1351 scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata); 1359 scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata);
@@ -1404,7 +1412,6 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
1404 if (!starget) 1412 if (!starget)
1405 return; 1413 return;
1406 1414
1407 get_device(&starget->dev);
1408 if (lun != SCAN_WILD_CARD) { 1415 if (lun != SCAN_WILD_CARD) {
1409 /* 1416 /*
1410 * Scan for a specific host/chan/id/lun. 1417 * Scan for a specific host/chan/id/lun.
@@ -1586,7 +1593,8 @@ struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
1586 if (sdev) { 1593 if (sdev) {
1587 sdev->sdev_gendev.parent = get_device(&starget->dev); 1594 sdev->sdev_gendev.parent = get_device(&starget->dev);
1588 sdev->borken = 0; 1595 sdev->borken = 0;
1589 } 1596 } else
1597 scsi_target_reap(starget);
1590 put_device(&starget->dev); 1598 put_device(&starget->dev);
1591 out: 1599 out:
1592 mutex_unlock(&shost->scan_mutex); 1600 mutex_unlock(&shost->scan_mutex);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index b03aa85108e5..38c215a78f69 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -32,6 +32,9 @@
32#include <scsi/scsi_transport.h> 32#include <scsi/scsi_transport.h>
33#include <scsi/scsi_transport_fc.h> 33#include <scsi/scsi_transport_fc.h>
34#include <scsi/scsi_cmnd.h> 34#include <scsi/scsi_cmnd.h>
35#include <linux/netlink.h>
36#include <net/netlink.h>
37#include <scsi/scsi_netlink_fc.h>
35#include "scsi_priv.h" 38#include "scsi_priv.h"
36 39
37static int fc_queue_work(struct Scsi_Host *, struct work_struct *); 40static int fc_queue_work(struct Scsi_Host *, struct work_struct *);
@@ -93,6 +96,29 @@ fc_enum_name_search(port_type, fc_port_type, fc_port_type_names)
93#define FC_PORTTYPE_MAX_NAMELEN 50 96#define FC_PORTTYPE_MAX_NAMELEN 50
94 97
95 98
99/* Convert fc_host_event_code values to ascii string name */
100static const struct {
101 enum fc_host_event_code value;
102 char *name;
103} fc_host_event_code_names[] = {
104 { FCH_EVT_LIP, "lip" },
105 { FCH_EVT_LINKUP, "link_up" },
106 { FCH_EVT_LINKDOWN, "link_down" },
107 { FCH_EVT_LIPRESET, "lip_reset" },
108 { FCH_EVT_RSCN, "rscn" },
109 { FCH_EVT_ADAPTER_CHANGE, "adapter_chg" },
110 { FCH_EVT_PORT_UNKNOWN, "port_unknown" },
111 { FCH_EVT_PORT_ONLINE, "port_online" },
112 { FCH_EVT_PORT_OFFLINE, "port_offline" },
113 { FCH_EVT_PORT_FABRIC, "port_fabric" },
114 { FCH_EVT_LINK_UNKNOWN, "link_unknown" },
115 { FCH_EVT_VENDOR_UNIQUE, "vendor_unique" },
116};
117fc_enum_name_search(host_event_code, fc_host_event_code,
118 fc_host_event_code_names)
119#define FC_HOST_EVENT_CODE_MAX_NAMELEN 30
120
121
96/* Convert fc_port_state values to ascii string name */ 122/* Convert fc_port_state values to ascii string name */
97static struct { 123static struct {
98 enum fc_port_state value; 124 enum fc_port_state value;
@@ -216,6 +242,7 @@ fc_bitfield_name_search(remote_port_roles, fc_remote_port_role_names)
216 242
217 243
218static void fc_timeout_deleted_rport(void *data); 244static void fc_timeout_deleted_rport(void *data);
245static void fc_timeout_fail_rport_io(void *data);
219static void fc_scsi_scan_rport(void *data); 246static void fc_scsi_scan_rport(void *data);
220 247
221/* 248/*
@@ -223,7 +250,7 @@ static void fc_scsi_scan_rport(void *data);
223 * Increase these values if you add attributes 250 * Increase these values if you add attributes
224 */ 251 */
225#define FC_STARGET_NUM_ATTRS 3 252#define FC_STARGET_NUM_ATTRS 3
226#define FC_RPORT_NUM_ATTRS 9 253#define FC_RPORT_NUM_ATTRS 10
227#define FC_HOST_NUM_ATTRS 17 254#define FC_HOST_NUM_ATTRS 17
228 255
229struct fc_internal { 256struct fc_internal {
@@ -301,8 +328,6 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
301 fc_host->supported_classes = FC_COS_UNSPECIFIED; 328 fc_host->supported_classes = FC_COS_UNSPECIFIED;
302 memset(fc_host->supported_fc4s, 0, 329 memset(fc_host->supported_fc4s, 0,
303 sizeof(fc_host->supported_fc4s)); 330 sizeof(fc_host->supported_fc4s));
304 memset(fc_host->symbolic_name, 0,
305 sizeof(fc_host->symbolic_name));
306 fc_host->supported_speeds = FC_PORTSPEED_UNKNOWN; 331 fc_host->supported_speeds = FC_PORTSPEED_UNKNOWN;
307 fc_host->maxframe_size = -1; 332 fc_host->maxframe_size = -1;
308 memset(fc_host->serial_number, 0, 333 memset(fc_host->serial_number, 0,
@@ -315,6 +340,8 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
315 sizeof(fc_host->active_fc4s)); 340 sizeof(fc_host->active_fc4s));
316 fc_host->speed = FC_PORTSPEED_UNKNOWN; 341 fc_host->speed = FC_PORTSPEED_UNKNOWN;
317 fc_host->fabric_name = -1; 342 fc_host->fabric_name = -1;
343 memset(fc_host->symbolic_name, 0, sizeof(fc_host->symbolic_name));
344 memset(fc_host->system_hostname, 0, sizeof(fc_host->system_hostname));
318 345
319 fc_host->tgtid_bind_type = FC_TGTID_BIND_BY_WWPN; 346 fc_host->tgtid_bind_type = FC_TGTID_BIND_BY_WWPN;
320 347
@@ -377,10 +404,184 @@ MODULE_PARM_DESC(dev_loss_tmo,
377 " exceeded, the scsi target is removed. Value should be" 404 " exceeded, the scsi target is removed. Value should be"
378 " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT."); 405 " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT.");
379 406
407/**
408 * Netlink Infrastructure
409 **/
410
411static atomic_t fc_event_seq;
412
413/**
414 * fc_get_event_number - Obtain the next sequential FC event number
415 *
416 * Notes:
417 * We could have inline'd this, but it would have required fc_event_seq to
418 * be exposed. For now, live with the subroutine call.
419 * Atomic used to avoid lock/unlock...
420 **/
421u32
422fc_get_event_number(void)
423{
424 return atomic_add_return(1, &fc_event_seq);
425}
426EXPORT_SYMBOL(fc_get_event_number);
427
428
429/**
430 * fc_host_post_event - called to post an even on an fc_host.
431 *
432 * @shost: host the event occurred on
433 * @event_number: fc event number obtained from get_fc_event_number()
434 * @event_code: fc_host event being posted
435 * @event_data: 32bits of data for the event being posted
436 *
437 * Notes:
438 * This routine assumes no locks are held on entry.
439 **/
440void
441fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
442 enum fc_host_event_code event_code, u32 event_data)
443{
444 struct sk_buff *skb;
445 struct nlmsghdr *nlh;
446 struct fc_nl_event *event;
447 const char *name;
448 u32 len, skblen;
449 int err;
450
451 if (!scsi_nl_sock) {
452 err = -ENOENT;
453 goto send_fail;
454 }
455
456 len = FC_NL_MSGALIGN(sizeof(*event));
457 skblen = NLMSG_SPACE(len);
458
459 skb = alloc_skb(skblen, GFP_KERNEL);
460 if (!skb) {
461 err = -ENOBUFS;
462 goto send_fail;
463 }
464
465 nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
466 skblen - sizeof(*nlh), 0);
467 if (!nlh) {
468 err = -ENOBUFS;
469 goto send_fail_skb;
470 }
471 event = NLMSG_DATA(nlh);
472
473 INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
474 FC_NL_ASYNC_EVENT, len);
475 event->seconds = get_seconds();
476 event->vendor_id = 0;
477 event->host_no = shost->host_no;
478 event->event_datalen = sizeof(u32); /* bytes */
479 event->event_num = event_number;
480 event->event_code = event_code;
481 event->event_data = event_data;
482
483 err = nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
484 GFP_KERNEL);
485 if (err && (err != -ESRCH)) /* filter no recipient errors */
486 /* nlmsg_multicast already kfree_skb'd */
487 goto send_fail;
488
489 return;
490
491send_fail_skb:
492 kfree_skb(skb);
493send_fail:
494 name = get_fc_host_event_code_name(event_code);
495 printk(KERN_WARNING
496 "%s: Dropped Event : host %d %s data 0x%08x - err %d\n",
497 __FUNCTION__, shost->host_no,
498 (name) ? name : "<unknown>", event_data, err);
499 return;
500}
501EXPORT_SYMBOL(fc_host_post_event);
502
503
504/**
505 * fc_host_post_vendor_event - called to post a vendor unique event on
506 * a fc_host
507 *
508 * @shost: host the event occurred on
509 * @event_number: fc event number obtained from get_fc_event_number()
510 * @data_len: amount, in bytes, of vendor unique data
511 * @data_buf: pointer to vendor unique data
512 *
513 * Notes:
514 * This routine assumes no locks are held on entry.
515 **/
516void
517fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
518 u32 data_len, char * data_buf, u64 vendor_id)
519{
520 struct sk_buff *skb;
521 struct nlmsghdr *nlh;
522 struct fc_nl_event *event;
523 u32 len, skblen;
524 int err;
525
526 if (!scsi_nl_sock) {
527 err = -ENOENT;
528 goto send_vendor_fail;
529 }
530
531 len = FC_NL_MSGALIGN(sizeof(*event) + data_len);
532 skblen = NLMSG_SPACE(len);
533
534 skb = alloc_skb(skblen, GFP_KERNEL);
535 if (!skb) {
536 err = -ENOBUFS;
537 goto send_vendor_fail;
538 }
539
540 nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
541 skblen - sizeof(*nlh), 0);
542 if (!nlh) {
543 err = -ENOBUFS;
544 goto send_vendor_fail_skb;
545 }
546 event = NLMSG_DATA(nlh);
547
548 INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
549 FC_NL_ASYNC_EVENT, len);
550 event->seconds = get_seconds();
551 event->vendor_id = vendor_id;
552 event->host_no = shost->host_no;
553 event->event_datalen = data_len; /* bytes */
554 event->event_num = event_number;
555 event->event_code = FCH_EVT_VENDOR_UNIQUE;
556 memcpy(&event->event_data, data_buf, data_len);
557
558 err = nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
559 GFP_KERNEL);
560 if (err && (err != -ESRCH)) /* filter no recipient errors */
561 /* nlmsg_multicast already kfree_skb'd */
562 goto send_vendor_fail;
563
564 return;
565
566send_vendor_fail_skb:
567 kfree_skb(skb);
568send_vendor_fail:
569 printk(KERN_WARNING
570 "%s: Dropped Event : host %d vendor_unique - err %d\n",
571 __FUNCTION__, shost->host_no, err);
572 return;
573}
574EXPORT_SYMBOL(fc_host_post_vendor_event);
575
576
380 577
381static __init int fc_transport_init(void) 578static __init int fc_transport_init(void)
382{ 579{
383 int error = transport_class_register(&fc_host_class); 580 int error;
581
582 atomic_set(&fc_event_seq, 0);
583
584 error = transport_class_register(&fc_host_class);
384 if (error) 585 if (error)
385 return error; 586 return error;
386 error = transport_class_register(&fc_rport_class); 587 error = transport_class_register(&fc_rport_class);
@@ -424,11 +625,14 @@ store_fc_rport_##field(struct class_device *cdev, const char *buf, \
424 struct fc_rport *rport = transport_class_to_rport(cdev); \ 625 struct fc_rport *rport = transport_class_to_rport(cdev); \
425 struct Scsi_Host *shost = rport_to_shost(rport); \ 626 struct Scsi_Host *shost = rport_to_shost(rport); \
426 struct fc_internal *i = to_fc_internal(shost->transportt); \ 627 struct fc_internal *i = to_fc_internal(shost->transportt); \
628 char *cp; \
427 if ((rport->port_state == FC_PORTSTATE_BLOCKED) || \ 629 if ((rport->port_state == FC_PORTSTATE_BLOCKED) || \
428 (rport->port_state == FC_PORTSTATE_DELETED) || \ 630 (rport->port_state == FC_PORTSTATE_DELETED) || \
429 (rport->port_state == FC_PORTSTATE_NOTPRESENT)) \ 631 (rport->port_state == FC_PORTSTATE_NOTPRESENT)) \
430 return -EBUSY; \ 632 return -EBUSY; \
431 val = simple_strtoul(buf, NULL, 0); \ 633 val = simple_strtoul(buf, &cp, 0); \
634 if (*cp && (*cp != '\n')) \
635 return -EINVAL; \
432 i->f->set_rport_##field(rport, val); \ 636 i->f->set_rport_##field(rport, val); \
433 return count; \ 637 return count; \
434} 638}
@@ -510,6 +714,13 @@ static FC_CLASS_DEVICE_ATTR(rport, title, S_IRUGO, \
510 if (i->f->show_rport_##field) \ 714 if (i->f->show_rport_##field) \
511 count++ 715 count++
512 716
717#define SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(field) \
718{ \
719 i->private_rport_attrs[count] = class_device_attr_rport_##field; \
720 i->rport_attrs[count] = &i->private_rport_attrs[count]; \
721 count++; \
722}
723
513 724
514/* The FC Transport Remote Port Attributes: */ 725/* The FC Transport Remote Port Attributes: */
515 726
@@ -542,12 +753,14 @@ store_fc_rport_dev_loss_tmo(struct class_device *cdev, const char *buf,
542 struct fc_rport *rport = transport_class_to_rport(cdev); 753 struct fc_rport *rport = transport_class_to_rport(cdev);
543 struct Scsi_Host *shost = rport_to_shost(rport); 754 struct Scsi_Host *shost = rport_to_shost(rport);
544 struct fc_internal *i = to_fc_internal(shost->transportt); 755 struct fc_internal *i = to_fc_internal(shost->transportt);
756 char *cp;
545 if ((rport->port_state == FC_PORTSTATE_BLOCKED) || 757 if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||
546 (rport->port_state == FC_PORTSTATE_DELETED) || 758 (rport->port_state == FC_PORTSTATE_DELETED) ||
547 (rport->port_state == FC_PORTSTATE_NOTPRESENT)) 759 (rport->port_state == FC_PORTSTATE_NOTPRESENT))
548 return -EBUSY; 760 return -EBUSY;
549 val = simple_strtoul(buf, NULL, 0); 761 val = simple_strtoul(buf, &cp, 0);
550 if ((val < 0) || (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)) 762 if ((*cp && (*cp != '\n')) ||
763 (val < 0) || (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT))
551 return -EINVAL; 764 return -EINVAL;
552 i->f->set_rport_dev_loss_tmo(rport, val); 765 i->f->set_rport_dev_loss_tmo(rport, val);
553 return count; 766 return count;
@@ -597,6 +810,44 @@ static FC_CLASS_DEVICE_ATTR(rport, roles, S_IRUGO,
597fc_private_rport_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN); 810fc_private_rport_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN);
598fc_private_rport_rd_attr(scsi_target_id, "%d\n", 20); 811fc_private_rport_rd_attr(scsi_target_id, "%d\n", 20);
599 812
813/*
814 * fast_io_fail_tmo attribute
815 */
816static ssize_t
817show_fc_rport_fast_io_fail_tmo (struct class_device *cdev, char *buf)
818{
819 struct fc_rport *rport = transport_class_to_rport(cdev);
820
821 if (rport->fast_io_fail_tmo == -1)
822 return snprintf(buf, 5, "off\n");
823 return snprintf(buf, 20, "%d\n", rport->fast_io_fail_tmo);
824}
825
826static ssize_t
827store_fc_rport_fast_io_fail_tmo(struct class_device *cdev, const char *buf,
828 size_t count)
829{
830 int val;
831 char *cp;
832 struct fc_rport *rport = transport_class_to_rport(cdev);
833
834 if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||
835 (rport->port_state == FC_PORTSTATE_DELETED) ||
836 (rport->port_state == FC_PORTSTATE_NOTPRESENT))
837 return -EBUSY;
838 if (strncmp(buf, "off", 3) == 0)
839 rport->fast_io_fail_tmo = -1;
840 else {
841 val = simple_strtoul(buf, &cp, 0);
842 if ((*cp && (*cp != '\n')) ||
843 (val < 0) || (val >= rport->dev_loss_tmo))
844 return -EINVAL;
845 rport->fast_io_fail_tmo = val;
846 }
847 return count;
848}
849static FC_CLASS_DEVICE_ATTR(rport, fast_io_fail_tmo, S_IRUGO | S_IWUSR,
850 show_fc_rport_fast_io_fail_tmo, store_fc_rport_fast_io_fail_tmo);
600 851
601 852
602/* 853/*
@@ -682,12 +933,34 @@ store_fc_host_##field(struct class_device *cdev, const char *buf, \
682 int val; \ 933 int val; \
683 struct Scsi_Host *shost = transport_class_to_shost(cdev); \ 934 struct Scsi_Host *shost = transport_class_to_shost(cdev); \
684 struct fc_internal *i = to_fc_internal(shost->transportt); \ 935 struct fc_internal *i = to_fc_internal(shost->transportt); \
936 char *cp; \
685 \ 937 \
686 val = simple_strtoul(buf, NULL, 0); \ 938 val = simple_strtoul(buf, &cp, 0); \
939 if (*cp && (*cp != '\n')) \
940 return -EINVAL; \
687 i->f->set_host_##field(shost, val); \ 941 i->f->set_host_##field(shost, val); \
688 return count; \ 942 return count; \
689} 943}
690 944
945#define fc_host_store_str_function(field, slen) \
946static ssize_t \
947store_fc_host_##field(struct class_device *cdev, const char *buf, \
948 size_t count) \
949{ \
950 struct Scsi_Host *shost = transport_class_to_shost(cdev); \
951 struct fc_internal *i = to_fc_internal(shost->transportt); \
952 unsigned int cnt=count; \
953 \
954 /* count may include a LF at end of string */ \
955 if (buf[cnt-1] == '\n') \
956 cnt--; \
957 if (cnt > ((slen) - 1)) \
958 return -EINVAL; \
959 memcpy(fc_host_##field(shost), buf, cnt); \
960 i->f->set_host_##field(shost); \
961 return count; \
962}
963
691#define fc_host_rd_attr(field, format_string, sz) \ 964#define fc_host_rd_attr(field, format_string, sz) \
692 fc_host_show_function(field, format_string, sz, ) \ 965 fc_host_show_function(field, format_string, sz, ) \
693static FC_CLASS_DEVICE_ATTR(host, field, S_IRUGO, \ 966static FC_CLASS_DEVICE_ATTR(host, field, S_IRUGO, \
@@ -815,7 +1088,6 @@ fc_private_host_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
815fc_private_host_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long); 1088fc_private_host_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
816fc_private_host_rd_attr_cast(permanent_port_name, "0x%llx\n", 20, 1089fc_private_host_rd_attr_cast(permanent_port_name, "0x%llx\n", 20,
817 unsigned long long); 1090 unsigned long long);
818fc_private_host_rd_attr(symbolic_name, "%s\n", (FC_SYMBOLIC_NAME_SIZE +1));
819fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20); 1091fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20);
820fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1)); 1092fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1));
821 1093
@@ -858,6 +1130,13 @@ fc_host_rd_attr(port_id, "0x%06x\n", 20);
858fc_host_rd_enum_attr(port_type, FC_PORTTYPE_MAX_NAMELEN); 1130fc_host_rd_enum_attr(port_type, FC_PORTTYPE_MAX_NAMELEN);
859fc_host_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN); 1131fc_host_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN);
860fc_host_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long); 1132fc_host_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long);
1133fc_host_rd_attr(symbolic_name, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
1134
1135fc_private_host_show_function(system_hostname, "%s\n",
1136 FC_SYMBOLIC_NAME_SIZE + 1, )
1137fc_host_store_str_function(system_hostname, FC_SYMBOLIC_NAME_SIZE)
1138static FC_CLASS_DEVICE_ATTR(host, system_hostname, S_IRUGO | S_IWUSR,
1139 show_fc_host_system_hostname, store_fc_host_system_hostname);
861 1140
862 1141
863/* Private Host Attributes */ 1142/* Private Host Attributes */
@@ -1223,7 +1502,6 @@ fc_attach_transport(struct fc_function_template *ft)
1223 SETUP_HOST_ATTRIBUTE_RD(permanent_port_name); 1502 SETUP_HOST_ATTRIBUTE_RD(permanent_port_name);
1224 SETUP_HOST_ATTRIBUTE_RD(supported_classes); 1503 SETUP_HOST_ATTRIBUTE_RD(supported_classes);
1225 SETUP_HOST_ATTRIBUTE_RD(supported_fc4s); 1504 SETUP_HOST_ATTRIBUTE_RD(supported_fc4s);
1226 SETUP_HOST_ATTRIBUTE_RD(symbolic_name);
1227 SETUP_HOST_ATTRIBUTE_RD(supported_speeds); 1505 SETUP_HOST_ATTRIBUTE_RD(supported_speeds);
1228 SETUP_HOST_ATTRIBUTE_RD(maxframe_size); 1506 SETUP_HOST_ATTRIBUTE_RD(maxframe_size);
1229 SETUP_HOST_ATTRIBUTE_RD(serial_number); 1507 SETUP_HOST_ATTRIBUTE_RD(serial_number);
@@ -1234,6 +1512,8 @@ fc_attach_transport(struct fc_function_template *ft)
1234 SETUP_HOST_ATTRIBUTE_RD(active_fc4s); 1512 SETUP_HOST_ATTRIBUTE_RD(active_fc4s);
1235 SETUP_HOST_ATTRIBUTE_RD(speed); 1513 SETUP_HOST_ATTRIBUTE_RD(speed);
1236 SETUP_HOST_ATTRIBUTE_RD(fabric_name); 1514 SETUP_HOST_ATTRIBUTE_RD(fabric_name);
1515 SETUP_HOST_ATTRIBUTE_RD(symbolic_name);
1516 SETUP_HOST_ATTRIBUTE_RW(system_hostname);
1237 1517
1238 /* Transport-managed attributes */ 1518 /* Transport-managed attributes */
1239 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type); 1519 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type);
@@ -1257,6 +1537,8 @@ fc_attach_transport(struct fc_function_template *ft)
1257 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles); 1537 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles);
1258 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_state); 1538 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_state);
1259 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id); 1539 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id);
1540 if (ft->terminate_rport_io)
1541 SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo);
1260 1542
1261 BUG_ON(count > FC_RPORT_NUM_ATTRS); 1543 BUG_ON(count > FC_RPORT_NUM_ATTRS);
1262 1544
@@ -1328,7 +1610,7 @@ fc_flush_work(struct Scsi_Host *shost)
1328 * @delay: jiffies to delay the work queuing 1610 * @delay: jiffies to delay the work queuing
1329 * 1611 *
1330 * Return value: 1612 * Return value:
1331 * 0 on success / != 0 for error 1613 * 1 on success / 0 already queued / < 0 for error
1332 **/ 1614 **/
1333static int 1615static int
1334fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work, 1616fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work,
@@ -1343,6 +1625,9 @@ fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work,
1343 return -EINVAL; 1625 return -EINVAL;
1344 } 1626 }
1345 1627
1628 if (delay == 0)
1629 return queue_work(fc_host_devloss_work_q(shost), work);
1630
1346 return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay); 1631 return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay);
1347} 1632}
1348 1633
@@ -1435,10 +1720,23 @@ fc_starget_delete(void *data)
1435 struct fc_rport *rport = (struct fc_rport *)data; 1720 struct fc_rport *rport = (struct fc_rport *)data;
1436 struct Scsi_Host *shost = rport_to_shost(rport); 1721 struct Scsi_Host *shost = rport_to_shost(rport);
1437 unsigned long flags; 1722 unsigned long flags;
1723 struct fc_internal *i = to_fc_internal(shost->transportt);
1724
1725 /*
1726 * Involve the LLDD if possible. All io on the rport is to
1727 * be terminated, either as part of the dev_loss_tmo callback
1728 * processing, or via the terminate_rport_io function.
1729 */
1730 if (i->f->dev_loss_tmo_callbk)
1731 i->f->dev_loss_tmo_callbk(rport);
1732 else if (i->f->terminate_rport_io)
1733 i->f->terminate_rport_io(rport);
1438 1734
1439 spin_lock_irqsave(shost->host_lock, flags); 1735 spin_lock_irqsave(shost->host_lock, flags);
1440 if (rport->flags & FC_RPORT_DEVLOSS_PENDING) { 1736 if (rport->flags & FC_RPORT_DEVLOSS_PENDING) {
1441 spin_unlock_irqrestore(shost->host_lock, flags); 1737 spin_unlock_irqrestore(shost->host_lock, flags);
1738 if (!cancel_delayed_work(&rport->fail_io_work))
1739 fc_flush_devloss(shost);
1442 if (!cancel_delayed_work(&rport->dev_loss_work)) 1740 if (!cancel_delayed_work(&rport->dev_loss_work))
1443 fc_flush_devloss(shost); 1741 fc_flush_devloss(shost);
1444 spin_lock_irqsave(shost->host_lock, flags); 1742 spin_lock_irqsave(shost->host_lock, flags);
@@ -1461,10 +1759,7 @@ fc_rport_final_delete(void *data)
1461 struct fc_rport *rport = (struct fc_rport *)data; 1759 struct fc_rport *rport = (struct fc_rport *)data;
1462 struct device *dev = &rport->dev; 1760 struct device *dev = &rport->dev;
1463 struct Scsi_Host *shost = rport_to_shost(rport); 1761 struct Scsi_Host *shost = rport_to_shost(rport);
1464 1762 struct fc_internal *i = to_fc_internal(shost->transportt);
1465 /* Delete SCSI target and sdevs */
1466 if (rport->scsi_target_id != -1)
1467 fc_starget_delete(data);
1468 1763
1469 /* 1764 /*
1470 * if a scan is pending, flush the SCSI Host work_q so that 1765 * if a scan is pending, flush the SCSI Host work_q so that
@@ -1473,6 +1768,14 @@ fc_rport_final_delete(void *data)
1473 if (rport->flags & FC_RPORT_SCAN_PENDING) 1768 if (rport->flags & FC_RPORT_SCAN_PENDING)
1474 scsi_flush_work(shost); 1769 scsi_flush_work(shost);
1475 1770
1771 /* Delete SCSI target and sdevs */
1772 if (rport->scsi_target_id != -1)
1773 fc_starget_delete(data);
1774 else if (i->f->dev_loss_tmo_callbk)
1775 i->f->dev_loss_tmo_callbk(rport);
1776 else if (i->f->terminate_rport_io)
1777 i->f->terminate_rport_io(rport);
1778
1476 transport_remove_device(dev); 1779 transport_remove_device(dev);
1477 device_del(dev); 1780 device_del(dev);
1478 transport_destroy_device(dev); 1781 transport_destroy_device(dev);
@@ -1524,8 +1827,10 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
1524 if (fci->f->dd_fcrport_size) 1827 if (fci->f->dd_fcrport_size)
1525 rport->dd_data = &rport[1]; 1828 rport->dd_data = &rport[1];
1526 rport->channel = channel; 1829 rport->channel = channel;
1830 rport->fast_io_fail_tmo = -1;
1527 1831
1528 INIT_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport, rport); 1832 INIT_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport, rport);
1833 INIT_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io, rport);
1529 INIT_WORK(&rport->scan_work, fc_scsi_scan_rport, rport); 1834 INIT_WORK(&rport->scan_work, fc_scsi_scan_rport, rport);
1530 INIT_WORK(&rport->stgt_delete_work, fc_starget_delete, rport); 1835 INIT_WORK(&rport->stgt_delete_work, fc_starget_delete, rport);
1531 INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete, rport); 1836 INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete, rport);
@@ -1689,11 +1994,13 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
1689 /* restart the target */ 1994 /* restart the target */
1690 1995
1691 /* 1996 /*
1692 * Stop the target timer first. Take no action 1997 * Stop the target timers first. Take no action
1693 * on the del_timer failure as the state 1998 * on the del_timer failure as the state
1694 * machine state change will validate the 1999 * machine state change will validate the
1695 * transaction. 2000 * transaction.
1696 */ 2001 */
2002 if (!cancel_delayed_work(&rport->fail_io_work))
2003 fc_flush_devloss(shost);
1697 if (!cancel_delayed_work(work)) 2004 if (!cancel_delayed_work(work))
1698 fc_flush_devloss(shost); 2005 fc_flush_devloss(shost);
1699 2006
@@ -1837,6 +2144,7 @@ void
1837fc_remote_port_delete(struct fc_rport *rport) 2144fc_remote_port_delete(struct fc_rport *rport)
1838{ 2145{
1839 struct Scsi_Host *shost = rport_to_shost(rport); 2146 struct Scsi_Host *shost = rport_to_shost(rport);
2147 struct fc_internal *i = to_fc_internal(shost->transportt);
1840 int timeout = rport->dev_loss_tmo; 2148 int timeout = rport->dev_loss_tmo;
1841 unsigned long flags; 2149 unsigned long flags;
1842 2150
@@ -1867,6 +2175,12 @@ fc_remote_port_delete(struct fc_rport *rport)
1867 2175
1868 scsi_target_block(&rport->dev); 2176 scsi_target_block(&rport->dev);
1869 2177
2178 /* see if we need to kill io faster than waiting for device loss */
2179 if ((rport->fast_io_fail_tmo != -1) &&
2180 (rport->fast_io_fail_tmo < timeout) && (i->f->terminate_rport_io))
2181 fc_queue_devloss_work(shost, &rport->fail_io_work,
2182 rport->fast_io_fail_tmo * HZ);
2183
1870 /* cap the length the devices can be blocked until they are deleted */ 2184 /* cap the length the devices can be blocked until they are deleted */
1871 fc_queue_devloss_work(shost, &rport->dev_loss_work, timeout * HZ); 2185 fc_queue_devloss_work(shost, &rport->dev_loss_work, timeout * HZ);
1872} 2186}
@@ -1926,6 +2240,8 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
1926 * machine state change will validate the 2240 * machine state change will validate the
1927 * transaction. 2241 * transaction.
1928 */ 2242 */
2243 if (!cancel_delayed_work(&rport->fail_io_work))
2244 fc_flush_devloss(shost);
1929 if (!cancel_delayed_work(&rport->dev_loss_work)) 2245 if (!cancel_delayed_work(&rport->dev_loss_work))
1930 fc_flush_devloss(shost); 2246 fc_flush_devloss(shost);
1931 2247
@@ -2047,6 +2363,28 @@ fc_timeout_deleted_rport(void *data)
2047} 2363}
2048 2364
2049/** 2365/**
2366 * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a
2367 * disconnected SCSI target.
2368 *
2369 * @data: rport to terminate io on.
2370 *
2371 * Notes: Only requests the failure of the io, not that all are flushed
2372 * prior to returning.
2373 **/
2374static void
2375fc_timeout_fail_rport_io(void *data)
2376{
2377 struct fc_rport *rport = (struct fc_rport *)data;
2378 struct Scsi_Host *shost = rport_to_shost(rport);
2379 struct fc_internal *i = to_fc_internal(shost->transportt);
2380
2381 if (rport->port_state != FC_PORTSTATE_BLOCKED)
2382 return;
2383
2384 i->f->terminate_rport_io(rport);
2385}
2386
2387/**
2050 * fc_scsi_scan_rport - called to perform a scsi scan on a remote port. 2388 * fc_scsi_scan_rport - called to perform a scsi scan on a remote port.
2051 * 2389 *
2052 * @data: remote port to be scanned. 2390 * @data: remote port to be scanned.
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 2ecd14188574..7b0019cccce3 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -34,7 +34,7 @@
34#define ISCSI_SESSION_ATTRS 11 34#define ISCSI_SESSION_ATTRS 11
35#define ISCSI_CONN_ATTRS 11 35#define ISCSI_CONN_ATTRS 11
36#define ISCSI_HOST_ATTRS 0 36#define ISCSI_HOST_ATTRS 0
37#define ISCSI_TRANSPORT_VERSION "1.1-646" 37#define ISCSI_TRANSPORT_VERSION "2.0-685"
38 38
39struct iscsi_internal { 39struct iscsi_internal {
40 int daemon_pid; 40 int daemon_pid;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 5a625c3fddae..b5b0c2cba96b 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -77,6 +77,24 @@ get_sas_##title##_names(u32 table_key, char *buf) \
77 return len; \ 77 return len; \
78} 78}
79 79
80#define sas_bitfield_name_set(title, table) \
81static ssize_t \
82set_sas_##title##_names(u32 *table_key, const char *buf) \
83{ \
84 ssize_t len = 0; \
85 int i; \
86 \
87 for (i = 0; i < ARRAY_SIZE(table); i++) { \
88 len = strlen(table[i].name); \
89 if (strncmp(buf, table[i].name, len) == 0 && \
90 (buf[len] == '\n' || buf[len] == '\0')) { \
91 *table_key = table[i].value; \
92 return 0; \
93 } \
94 } \
95 return -EINVAL; \
96}
97
80#define sas_bitfield_name_search(title, table) \ 98#define sas_bitfield_name_search(title, table) \
81static ssize_t \ 99static ssize_t \
82get_sas_##title##_names(u32 table_key, char *buf) \ 100get_sas_##title##_names(u32 table_key, char *buf) \
@@ -131,7 +149,7 @@ static struct {
131 { SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" }, 149 { SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" },
132}; 150};
133sas_bitfield_name_search(linkspeed, sas_linkspeed_names) 151sas_bitfield_name_search(linkspeed, sas_linkspeed_names)
134 152sas_bitfield_name_set(linkspeed, sas_linkspeed_names)
135 153
136/* 154/*
137 * SAS host attributes 155 * SAS host attributes
@@ -253,10 +271,39 @@ show_sas_phy_##field(struct class_device *cdev, char *buf) \
253 return get_sas_linkspeed_names(phy->field, buf); \ 271 return get_sas_linkspeed_names(phy->field, buf); \
254} 272}
255 273
274/* Fudge to tell if we're minimum or maximum */
275#define sas_phy_store_linkspeed(field) \
276static ssize_t \
277store_sas_phy_##field(struct class_device *cdev, const char *buf, \
278 size_t count) \
279{ \
280 struct sas_phy *phy = transport_class_to_phy(cdev); \
281 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); \
282 struct sas_internal *i = to_sas_internal(shost->transportt); \
283 u32 value; \
284 struct sas_phy_linkrates rates = {0}; \
285 int error; \
286 \
287 error = set_sas_linkspeed_names(&value, buf); \
288 if (error) \
289 return error; \
290 rates.field = value; \
291 error = i->f->set_phy_speed(phy, &rates); \
292 \
293 return error ? error : count; \
294}
295
296#define sas_phy_linkspeed_rw_attr(field) \
297 sas_phy_show_linkspeed(field) \
298 sas_phy_store_linkspeed(field) \
299static CLASS_DEVICE_ATTR(field, S_IRUGO, show_sas_phy_##field, \
300 store_sas_phy_##field)
301
256#define sas_phy_linkspeed_attr(field) \ 302#define sas_phy_linkspeed_attr(field) \
257 sas_phy_show_linkspeed(field) \ 303 sas_phy_show_linkspeed(field) \
258static CLASS_DEVICE_ATTR(field, S_IRUGO, show_sas_phy_##field, NULL) 304static CLASS_DEVICE_ATTR(field, S_IRUGO, show_sas_phy_##field, NULL)
259 305
306
260#define sas_phy_show_linkerror(field) \ 307#define sas_phy_show_linkerror(field) \
261static ssize_t \ 308static ssize_t \
262show_sas_phy_##field(struct class_device *cdev, char *buf) \ 309show_sas_phy_##field(struct class_device *cdev, char *buf) \
@@ -266,9 +313,6 @@ show_sas_phy_##field(struct class_device *cdev, char *buf) \
266 struct sas_internal *i = to_sas_internal(shost->transportt); \ 313 struct sas_internal *i = to_sas_internal(shost->transportt); \
267 int error; \ 314 int error; \
268 \ 315 \
269 if (!phy->local_attached) \
270 return -EINVAL; \
271 \
272 error = i->f->get_linkerrors ? i->f->get_linkerrors(phy) : 0; \ 316 error = i->f->get_linkerrors ? i->f->get_linkerrors(phy) : 0; \
273 if (error) \ 317 if (error) \
274 return error; \ 318 return error; \
@@ -299,9 +343,6 @@ static ssize_t do_sas_phy_reset(struct class_device *cdev,
299 struct sas_internal *i = to_sas_internal(shost->transportt); 343 struct sas_internal *i = to_sas_internal(shost->transportt);
300 int error; 344 int error;
301 345
302 if (!phy->local_attached)
303 return -EINVAL;
304
305 error = i->f->phy_reset(phy, hard_reset); 346 error = i->f->phy_reset(phy, hard_reset);
306 if (error) 347 if (error)
307 return error; 348 return error;
@@ -332,9 +373,9 @@ sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);
332//sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", int); 373//sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", int);
333sas_phy_linkspeed_attr(negotiated_linkrate); 374sas_phy_linkspeed_attr(negotiated_linkrate);
334sas_phy_linkspeed_attr(minimum_linkrate_hw); 375sas_phy_linkspeed_attr(minimum_linkrate_hw);
335sas_phy_linkspeed_attr(minimum_linkrate); 376sas_phy_linkspeed_rw_attr(minimum_linkrate);
336sas_phy_linkspeed_attr(maximum_linkrate_hw); 377sas_phy_linkspeed_attr(maximum_linkrate_hw);
337sas_phy_linkspeed_attr(maximum_linkrate); 378sas_phy_linkspeed_rw_attr(maximum_linkrate);
338sas_phy_linkerror_attr(invalid_dword_count); 379sas_phy_linkerror_attr(invalid_dword_count);
339sas_phy_linkerror_attr(running_disparity_error_count); 380sas_phy_linkerror_attr(running_disparity_error_count);
340sas_phy_linkerror_attr(loss_of_dword_sync_count); 381sas_phy_linkerror_attr(loss_of_dword_sync_count);
@@ -849,7 +890,7 @@ show_sas_rphy_enclosure_identifier(struct class_device *cdev, char *buf)
849 * Only devices behind an expander are supported, because the 890 * Only devices behind an expander are supported, because the
850 * enclosure identifier is a SMP feature. 891 * enclosure identifier is a SMP feature.
851 */ 892 */
852 if (phy->local_attached) 893 if (scsi_is_sas_phy_local(phy))
853 return -EINVAL; 894 return -EINVAL;
854 895
855 error = i->f->get_enclosure_identifier(rphy, &identifier); 896 error = i->f->get_enclosure_identifier(rphy, &identifier);
@@ -870,7 +911,7 @@ show_sas_rphy_bay_identifier(struct class_device *cdev, char *buf)
870 struct sas_internal *i = to_sas_internal(shost->transportt); 911 struct sas_internal *i = to_sas_internal(shost->transportt);
871 int val; 912 int val;
872 913
873 if (phy->local_attached) 914 if (scsi_is_sas_phy_local(phy))
874 return -EINVAL; 915 return -EINVAL;
875 916
876 val = i->f->get_bay_identifier(rphy); 917 val = i->f->get_bay_identifier(rphy);
@@ -1316,13 +1357,23 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel,
1316 * Setup / Teardown code 1357 * Setup / Teardown code
1317 */ 1358 */
1318 1359
1319#define SETUP_TEMPLATE(attrb, field, perm, test) \ 1360#define SETUP_TEMPLATE(attrb, field, perm, test) \
1320 i->private_##attrb[count] = class_device_attr_##field; \ 1361 i->private_##attrb[count] = class_device_attr_##field; \
1321 i->private_##attrb[count].attr.mode = perm; \ 1362 i->private_##attrb[count].attr.mode = perm; \
1322 i->attrb[count] = &i->private_##attrb[count]; \ 1363 i->attrb[count] = &i->private_##attrb[count]; \
1323 if (test) \ 1364 if (test) \
1324 count++ 1365 count++
1325 1366
1367#define SETUP_TEMPLATE_RW(attrb, field, perm, test, ro_test, ro_perm) \
1368 i->private_##attrb[count] = class_device_attr_##field; \
1369 i->private_##attrb[count].attr.mode = perm; \
1370 if (ro_test) { \
1371 i->private_##attrb[count].attr.mode = ro_perm; \
1372 i->private_##attrb[count].store = NULL; \
1373 } \
1374 i->attrb[count] = &i->private_##attrb[count]; \
1375 if (test) \
1376 count++
1326 1377
1327#define SETUP_RPORT_ATTRIBUTE(field) \ 1378#define SETUP_RPORT_ATTRIBUTE(field) \
1328 SETUP_TEMPLATE(rphy_attrs, field, S_IRUGO, 1) 1379 SETUP_TEMPLATE(rphy_attrs, field, S_IRUGO, 1)
@@ -1333,6 +1384,10 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel,
1333#define SETUP_PHY_ATTRIBUTE(field) \ 1384#define SETUP_PHY_ATTRIBUTE(field) \
1334 SETUP_TEMPLATE(phy_attrs, field, S_IRUGO, 1) 1385 SETUP_TEMPLATE(phy_attrs, field, S_IRUGO, 1)
1335 1386
1387#define SETUP_PHY_ATTRIBUTE_RW(field) \
1388 SETUP_TEMPLATE_RW(phy_attrs, field, S_IRUGO | S_IWUSR, 1, \
1389 !i->f->set_phy_speed, S_IRUGO)
1390
1336#define SETUP_PORT_ATTRIBUTE(field) \ 1391#define SETUP_PORT_ATTRIBUTE(field) \
1337 SETUP_TEMPLATE(port_attrs, field, S_IRUGO, 1) 1392 SETUP_TEMPLATE(port_attrs, field, S_IRUGO, 1)
1338 1393
@@ -1413,9 +1468,9 @@ sas_attach_transport(struct sas_function_template *ft)
1413 //SETUP_PHY_ATTRIBUTE(port_identifier); 1468 //SETUP_PHY_ATTRIBUTE(port_identifier);
1414 SETUP_PHY_ATTRIBUTE(negotiated_linkrate); 1469 SETUP_PHY_ATTRIBUTE(negotiated_linkrate);
1415 SETUP_PHY_ATTRIBUTE(minimum_linkrate_hw); 1470 SETUP_PHY_ATTRIBUTE(minimum_linkrate_hw);
1416 SETUP_PHY_ATTRIBUTE(minimum_linkrate); 1471 SETUP_PHY_ATTRIBUTE_RW(minimum_linkrate);
1417 SETUP_PHY_ATTRIBUTE(maximum_linkrate_hw); 1472 SETUP_PHY_ATTRIBUTE(maximum_linkrate_hw);
1418 SETUP_PHY_ATTRIBUTE(maximum_linkrate); 1473 SETUP_PHY_ATTRIBUTE_RW(maximum_linkrate);
1419 1474
1420 SETUP_PHY_ATTRIBUTE(invalid_dword_count); 1475 SETUP_PHY_ATTRIBUTE(invalid_dword_count);
1421 SETUP_PHY_ATTRIBUTE(running_disparity_error_count); 1476 SETUP_PHY_ATTRIBUTE(running_disparity_error_count);
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 29a9a53cdd1a..9f070f0d0f2b 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -47,6 +47,7 @@
47 47
48/* Private data accessors (keep these out of the header file) */ 48/* Private data accessors (keep these out of the header file) */
49#define spi_dv_pending(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_pending) 49#define spi_dv_pending(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_pending)
50#define spi_dv_in_progress(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_in_progress)
50#define spi_dv_mutex(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_mutex) 51#define spi_dv_mutex(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_mutex)
51 52
52struct spi_internal { 53struct spi_internal {
@@ -240,6 +241,7 @@ static int spi_setup_transport_attrs(struct transport_container *tc,
240 spi_pcomp_en(starget) = 0; 241 spi_pcomp_en(starget) = 0;
241 spi_hold_mcs(starget) = 0; 242 spi_hold_mcs(starget) = 0;
242 spi_dv_pending(starget) = 0; 243 spi_dv_pending(starget) = 0;
244 spi_dv_in_progress(starget) = 0;
243 spi_initial_dv(starget) = 0; 245 spi_initial_dv(starget) = 0;
244 mutex_init(&spi_dv_mutex(starget)); 246 mutex_init(&spi_dv_mutex(starget));
245 247
@@ -830,28 +832,37 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
830 DV_SET(period, spi_min_period(starget)); 832 DV_SET(period, spi_min_period(starget));
831 /* try QAS requests; this should be harmless to set if the 833 /* try QAS requests; this should be harmless to set if the
832 * target supports it */ 834 * target supports it */
833 if (scsi_device_qas(sdev)) 835 if (scsi_device_qas(sdev)) {
834 DV_SET(qas, 1); 836 DV_SET(qas, 1);
835 /* Also try IU transfers */ 837 } else {
836 if (scsi_device_ius(sdev)) 838 DV_SET(qas, 0);
839 }
840
841 if (scsi_device_ius(sdev) && spi_min_period(starget) < 9) {
842 /* This u320 (or u640). Set IU transfers */
837 DV_SET(iu, 1); 843 DV_SET(iu, 1);
838 if (spi_min_period(starget) < 9) { 844 /* Then set the optional parameters */
839 /* This u320 (or u640). Ignore the coupled parameters
840 * like DT and IU, but set the optional ones */
841 DV_SET(rd_strm, 1); 845 DV_SET(rd_strm, 1);
842 DV_SET(wr_flow, 1); 846 DV_SET(wr_flow, 1);
843 DV_SET(rti, 1); 847 DV_SET(rti, 1);
844 if (spi_min_period(starget) == 8) 848 if (spi_min_period(starget) == 8)
845 DV_SET(pcomp_en, 1); 849 DV_SET(pcomp_en, 1);
850 } else {
851 DV_SET(iu, 0);
846 } 852 }
853
847 /* now that we've done all this, actually check the bus 854 /* now that we've done all this, actually check the bus
848 * signal type (if known). Some devices are stupid on 855 * signal type (if known). Some devices are stupid on
849 * a SE bus and still claim they can try LVD only settings */ 856 * a SE bus and still claim they can try LVD only settings */
850 if (i->f->get_signalling) 857 if (i->f->get_signalling)
851 i->f->get_signalling(shost); 858 i->f->get_signalling(shost);
852 if (spi_signalling(shost) == SPI_SIGNAL_SE || 859 if (spi_signalling(shost) == SPI_SIGNAL_SE ||
853 spi_signalling(shost) == SPI_SIGNAL_HVD) 860 spi_signalling(shost) == SPI_SIGNAL_HVD ||
861 !scsi_device_dt(sdev)) {
854 DV_SET(dt, 0); 862 DV_SET(dt, 0);
863 } else {
864 DV_SET(dt, 1);
865 }
855 /* Do the read only INQUIRY tests */ 866 /* Do the read only INQUIRY tests */
856 spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len, 867 spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len,
857 spi_dv_device_compare_inquiry); 868 spi_dv_device_compare_inquiry);
@@ -907,6 +918,10 @@ spi_dv_device(struct scsi_device *sdev)
907 if (unlikely(scsi_device_get(sdev))) 918 if (unlikely(scsi_device_get(sdev)))
908 return; 919 return;
909 920
921 if (unlikely(spi_dv_in_progress(starget)))
922 return;
923 spi_dv_in_progress(starget) = 1;
924
910 buffer = kzalloc(len, GFP_KERNEL); 925 buffer = kzalloc(len, GFP_KERNEL);
911 926
912 if (unlikely(!buffer)) 927 if (unlikely(!buffer))
@@ -938,6 +953,7 @@ spi_dv_device(struct scsi_device *sdev)
938 out_free: 953 out_free:
939 kfree(buffer); 954 kfree(buffer);
940 out_put: 955 out_put:
956 spi_dv_in_progress(starget) = 0;
941 scsi_device_put(sdev); 957 scsi_device_put(sdev);
942} 958}
943EXPORT_SYMBOL(spi_dv_device); 959EXPORT_SYMBOL(spi_dv_device);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 98bd3aab9739..638cff41d436 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1215,7 +1215,7 @@ repeat:
1215 /* Either no media are present but the drive didn't tell us, 1215 /* Either no media are present but the drive didn't tell us,
1216 or they are present but the read capacity command fails */ 1216 or they are present but the read capacity command fails */
1217 /* sdkp->media_present = 0; -- not always correct */ 1217 /* sdkp->media_present = 0; -- not always correct */
1218 sdkp->capacity = 0x200000; /* 1 GB - random */ 1218 sdkp->capacity = 0; /* unknown mapped to zero - as usual */
1219 1219
1220 return; 1220 return;
1221 } else if (the_result && longrc) { 1221 } else if (the_result && longrc) {
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 7cd366fcc571..4f1db6f2aae8 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -97,7 +97,7 @@ static irqreturn_t sgiwd93_intr(int irq, void *dev_id, struct pt_regs *regs)
97} 97}
98 98
99static inline 99static inline
100void fill_hpc_entries(struct hpc_chunk *hcp, Scsi_Cmnd *cmd, int datainp) 100void fill_hpc_entries(struct hpc_chunk *hcp, struct scsi_cmnd *cmd, int datainp)
101{ 101{
102 unsigned long len = cmd->SCp.this_residual; 102 unsigned long len = cmd->SCp.this_residual;
103 void *addr = cmd->SCp.ptr; 103 void *addr = cmd->SCp.ptr;
@@ -129,7 +129,7 @@ void fill_hpc_entries(struct hpc_chunk *hcp, Scsi_Cmnd *cmd, int datainp)
129 hcp->desc.cntinfo = HPCDMA_EOX; 129 hcp->desc.cntinfo = HPCDMA_EOX;
130} 130}
131 131
132static int dma_setup(Scsi_Cmnd *cmd, int datainp) 132static int dma_setup(struct scsi_cmnd *cmd, int datainp)
133{ 133{
134 struct ip22_hostdata *hdata = HDATA(cmd->device->host); 134 struct ip22_hostdata *hdata = HDATA(cmd->device->host);
135 struct hpc3_scsiregs *hregs = 135 struct hpc3_scsiregs *hregs =
@@ -163,7 +163,7 @@ static int dma_setup(Scsi_Cmnd *cmd, int datainp)
163 return 0; 163 return 0;
164} 164}
165 165
166static void dma_stop(struct Scsi_Host *instance, Scsi_Cmnd *SCpnt, 166static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
167 int status) 167 int status)
168{ 168{
169 struct ip22_hostdata *hdata = HDATA(instance); 169 struct ip22_hostdata *hdata = HDATA(instance);
@@ -305,7 +305,7 @@ static int sgiwd93_release(struct Scsi_Host *instance)
305 return 1; 305 return 1;
306} 306}
307 307
308static int sgiwd93_bus_reset(Scsi_Cmnd *cmd) 308static int sgiwd93_bus_reset(struct scsi_cmnd *cmd)
309{ 309{
310 /* FIXME perform bus-specific reset */ 310 /* FIXME perform bus-specific reset */
311 311
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
new file mode 100644
index 000000000000..3cf3106a29b8
--- /dev/null
+++ b/drivers/scsi/stex.c
@@ -0,0 +1,1252 @@
1/*
2 * SuperTrak EX Series Storage Controller driver for Linux
3 *
4 * Copyright (C) 2005, 2006 Promise Technology Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Written By:
12 * Ed Lin <promise_linux@promise.com>
13 *
14 * Version: 2.9.0.13
15 *
16 */
17
18#include <linux/init.h>
19#include <linux/errno.h>
20#include <linux/kernel.h>
21#include <linux/delay.h>
22#include <linux/sched.h>
23#include <linux/time.h>
24#include <linux/pci.h>
25#include <linux/blkdev.h>
26#include <linux/interrupt.h>
27#include <linux/types.h>
28#include <linux/module.h>
29#include <linux/spinlock.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <asm/byteorder.h>
33#include <scsi/scsi.h>
34#include <scsi/scsi_device.h>
35#include <scsi/scsi_cmnd.h>
36#include <scsi/scsi_host.h>
37#include <scsi/scsi_tcq.h>
38
39#define DRV_NAME "stex"
40#define ST_DRIVER_VERSION "2.9.0.13"
41#define ST_VER_MAJOR 2
42#define ST_VER_MINOR 9
43#define ST_OEM 0
44#define ST_BUILD_VER 13
45
46enum {
47 /* MU register offset */
48 IMR0 = 0x10, /* MU_INBOUND_MESSAGE_REG0 */
49 IMR1 = 0x14, /* MU_INBOUND_MESSAGE_REG1 */
50 OMR0 = 0x18, /* MU_OUTBOUND_MESSAGE_REG0 */
51 OMR1 = 0x1c, /* MU_OUTBOUND_MESSAGE_REG1 */
52 IDBL = 0x20, /* MU_INBOUND_DOORBELL */
53 IIS = 0x24, /* MU_INBOUND_INTERRUPT_STATUS */
54 IIM = 0x28, /* MU_INBOUND_INTERRUPT_MASK */
55 ODBL = 0x2c, /* MU_OUTBOUND_DOORBELL */
56 OIS = 0x30, /* MU_OUTBOUND_INTERRUPT_STATUS */
57 OIM = 0x3c, /* MU_OUTBOUND_INTERRUPT_MASK */
58
59 /* MU register value */
60 MU_INBOUND_DOORBELL_HANDSHAKE = 1,
61 MU_INBOUND_DOORBELL_REQHEADCHANGED = 2,
62 MU_INBOUND_DOORBELL_STATUSTAILCHANGED = 4,
63 MU_INBOUND_DOORBELL_HMUSTOPPED = 8,
64 MU_INBOUND_DOORBELL_RESET = 16,
65
66 MU_OUTBOUND_DOORBELL_HANDSHAKE = 1,
67 MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED = 2,
68 MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED = 4,
69 MU_OUTBOUND_DOORBELL_BUSCHANGE = 8,
70 MU_OUTBOUND_DOORBELL_HASEVENT = 16,
71
72 /* MU status code */
73 MU_STATE_STARTING = 1,
74 MU_STATE_FMU_READY_FOR_HANDSHAKE = 2,
75 MU_STATE_SEND_HANDSHAKE_FRAME = 3,
76 MU_STATE_STARTED = 4,
77 MU_STATE_RESETTING = 5,
78
79 MU_MAX_DELAY_TIME = 240000,
80 MU_HANDSHAKE_SIGNATURE = 0x55aaaa55,
81 HMU_PARTNER_TYPE = 2,
82
83 /* firmware returned values */
84 SRB_STATUS_SUCCESS = 0x01,
85 SRB_STATUS_ERROR = 0x04,
86 SRB_STATUS_BUSY = 0x05,
87 SRB_STATUS_INVALID_REQUEST = 0x06,
88 SRB_STATUS_SELECTION_TIMEOUT = 0x0A,
89 SRB_SEE_SENSE = 0x80,
90
91 /* task attribute */
92 TASK_ATTRIBUTE_SIMPLE = 0x0,
93 TASK_ATTRIBUTE_HEADOFQUEUE = 0x1,
94 TASK_ATTRIBUTE_ORDERED = 0x2,
95 TASK_ATTRIBUTE_ACA = 0x4,
96
97 /* request count, etc. */
98 MU_MAX_REQUEST = 32,
99
100 /* one message wasted, use MU_MAX_REQUEST+1
101 to handle MU_MAX_REQUEST messages */
102 MU_REQ_COUNT = (MU_MAX_REQUEST + 1),
103 MU_STATUS_COUNT = (MU_MAX_REQUEST + 1),
104
105 STEX_CDB_LENGTH = MAX_COMMAND_SIZE,
106 REQ_VARIABLE_LEN = 1024,
107 STATUS_VAR_LEN = 128,
108 ST_CAN_QUEUE = MU_MAX_REQUEST,
109 ST_CMD_PER_LUN = MU_MAX_REQUEST,
110 ST_MAX_SG = 32,
111
112 /* sg flags */
113 SG_CF_EOT = 0x80, /* end of table */
114 SG_CF_64B = 0x40, /* 64 bit item */
115 SG_CF_HOST = 0x20, /* sg in host memory */
116
117 ST_MAX_ARRAY_SUPPORTED = 16,
118 ST_MAX_TARGET_NUM = (ST_MAX_ARRAY_SUPPORTED+1),
119 ST_MAX_LUN_PER_TARGET = 16,
120
121 st_shasta = 0,
122 st_vsc = 1,
123
124 PASSTHRU_REQ_TYPE = 0x00000001,
125 PASSTHRU_REQ_NO_WAKEUP = 0x00000100,
126 ST_INTERNAL_TIMEOUT = 30,
127
128 /* vendor specific commands of Promise */
129 ARRAY_CMD = 0xe0,
130 CONTROLLER_CMD = 0xe1,
131 DEBUGGING_CMD = 0xe2,
132 PASSTHRU_CMD = 0xe3,
133
134 PASSTHRU_GET_ADAPTER = 0x05,
135 PASSTHRU_GET_DRVVER = 0x10,
136 CTLR_POWER_STATE_CHANGE = 0x0e,
137 CTLR_POWER_SAVING = 0x01,
138
139 PASSTHRU_SIGNATURE = 0x4e415041,
140
141 INQUIRY_EVPD = 0x01,
142};
143
144struct st_sgitem {
145 u8 ctrl; /* SG_CF_xxx */
146 u8 reserved[3];
147 __le32 count;
148 __le32 addr;
149 __le32 addr_hi;
150};
151
152struct st_sgtable {
153 __le16 sg_count;
154 __le16 max_sg_count;
155 __le32 sz_in_byte;
156 struct st_sgitem table[ST_MAX_SG];
157};
158
159struct handshake_frame {
160 __le32 rb_phy; /* request payload queue physical address */
161 __le32 rb_phy_hi;
162 __le16 req_sz; /* size of each request payload */
163 __le16 req_cnt; /* count of reqs the buffer can hold */
164 __le16 status_sz; /* size of each status payload */
165 __le16 status_cnt; /* count of status the buffer can hold */
166 __le32 hosttime; /* seconds from Jan 1, 1970 (GMT) */
167 __le32 hosttime_hi;
168 u8 partner_type; /* who sends this frame */
169 u8 reserved0[7];
170 __le32 partner_ver_major;
171 __le32 partner_ver_minor;
172 __le32 partner_ver_oem;
173 __le32 partner_ver_build;
174 u32 reserved1[4];
175};
176
177struct req_msg {
178 __le16 tag;
179 u8 lun;
180 u8 target;
181 u8 task_attr;
182 u8 task_manage;
183 u8 prd_entry;
184 u8 payload_sz; /* payload size in 4-byte */
185 u8 cdb[STEX_CDB_LENGTH];
186 u8 variable[REQ_VARIABLE_LEN];
187};
188
189struct status_msg {
190 __le16 tag;
191 u8 lun;
192 u8 target;
193 u8 srb_status;
194 u8 scsi_status;
195 u8 reserved;
196 u8 payload_sz; /* payload size in 4-byte */
197 u8 variable[STATUS_VAR_LEN];
198};
199
200struct ver_info {
201 u32 major;
202 u32 minor;
203 u32 oem;
204 u32 build;
205 u32 reserved[2];
206};
207
208struct st_frame {
209 u32 base[6];
210 u32 rom_addr;
211
212 struct ver_info drv_ver;
213 struct ver_info bios_ver;
214
215 u32 bus;
216 u32 slot;
217 u32 irq_level;
218 u32 irq_vec;
219 u32 id;
220 u32 subid;
221
222 u32 dimm_size;
223 u8 dimm_type;
224 u8 reserved[3];
225
226 u32 channel;
227 u32 reserved1;
228};
229
230struct st_drvver {
231 u32 major;
232 u32 minor;
233 u32 oem;
234 u32 build;
235 u32 signature[2];
236 u8 console_id;
237 u8 host_no;
238 u8 reserved0[2];
239 u32 reserved[3];
240};
241
242#define MU_REQ_BUFFER_SIZE (MU_REQ_COUNT * sizeof(struct req_msg))
243#define MU_STATUS_BUFFER_SIZE (MU_STATUS_COUNT * sizeof(struct status_msg))
244#define MU_BUFFER_SIZE (MU_REQ_BUFFER_SIZE + MU_STATUS_BUFFER_SIZE)
245#define STEX_BUFFER_SIZE (MU_BUFFER_SIZE + sizeof(struct st_frame))
246
247struct st_ccb {
248 struct req_msg *req;
249 struct scsi_cmnd *cmd;
250
251 void *sense_buffer;
252 unsigned int sense_bufflen;
253 int sg_count;
254
255 u32 req_type;
256 u8 srb_status;
257 u8 scsi_status;
258};
259
260struct st_hba {
261 void __iomem *mmio_base; /* iomapped PCI memory space */
262 void *dma_mem;
263 dma_addr_t dma_handle;
264
265 struct Scsi_Host *host;
266 struct pci_dev *pdev;
267
268 u32 req_head;
269 u32 req_tail;
270 u32 status_head;
271 u32 status_tail;
272
273 struct status_msg *status_buffer;
274 void *copy_buffer; /* temp buffer for driver-handled commands */
275 struct st_ccb ccb[MU_MAX_REQUEST];
276 struct st_ccb *wait_ccb;
277 wait_queue_head_t waitq;
278
279 unsigned int mu_status;
280 int out_req_cnt;
281
282 unsigned int cardtype;
283};
284
285static const char console_inq_page[] =
286{
287 0x03,0x00,0x03,0x03,0xFA,0x00,0x00,0x30,
288 0x50,0x72,0x6F,0x6D,0x69,0x73,0x65,0x20, /* "Promise " */
289 0x52,0x41,0x49,0x44,0x20,0x43,0x6F,0x6E, /* "RAID Con" */
290 0x73,0x6F,0x6C,0x65,0x20,0x20,0x20,0x20, /* "sole " */
291 0x31,0x2E,0x30,0x30,0x20,0x20,0x20,0x20, /* "1.00 " */
292 0x53,0x58,0x2F,0x52,0x53,0x41,0x46,0x2D, /* "SX/RSAF-" */
293 0x54,0x45,0x31,0x2E,0x30,0x30,0x20,0x20, /* "TE1.00 " */
294 0x0C,0x20,0x20,0x20,0x20,0x20,0x20,0x20
295};
296
297MODULE_AUTHOR("Ed Lin");
298MODULE_DESCRIPTION("Promise Technology SuperTrak EX Controllers");
299MODULE_LICENSE("GPL");
300MODULE_VERSION(ST_DRIVER_VERSION);
301
302static void stex_gettime(__le32 *time)
303{
304 struct timeval tv;
305 do_gettimeofday(&tv);
306
307 *time = cpu_to_le32(tv.tv_sec & 0xffffffff);
308 *(time + 1) = cpu_to_le32((tv.tv_sec >> 16) >> 16);
309}
310
311static struct status_msg *stex_get_status(struct st_hba *hba)
312{
313 struct status_msg *status =
314 hba->status_buffer + hba->status_tail;
315
316 ++hba->status_tail;
317 hba->status_tail %= MU_STATUS_COUNT;
318
319 return status;
320}
321
322static void stex_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
323{
324 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
325
326 cmd->sense_buffer[0] = 0x70; /* fixed format, current */
327 cmd->sense_buffer[2] = sk;
328 cmd->sense_buffer[7] = 18 - 8; /* additional sense length */
329 cmd->sense_buffer[12] = asc;
330 cmd->sense_buffer[13] = ascq;
331}
332
333static void stex_invalid_field(struct scsi_cmnd *cmd,
334 void (*done)(struct scsi_cmnd *))
335{
336 /* "Invalid field in cbd" */
337 stex_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
338 done(cmd);
339}
340
341static struct req_msg *stex_alloc_req(struct st_hba *hba)
342{
343 struct req_msg *req = ((struct req_msg *)hba->dma_mem) +
344 hba->req_head;
345
346 ++hba->req_head;
347 hba->req_head %= MU_REQ_COUNT;
348
349 return req;
350}
351
352static int stex_map_sg(struct st_hba *hba,
353 struct req_msg *req, struct st_ccb *ccb)
354{
355 struct pci_dev *pdev = hba->pdev;
356 struct scsi_cmnd *cmd;
357 dma_addr_t dma_handle;
358 struct scatterlist *src;
359 struct st_sgtable *dst;
360 int i;
361
362 cmd = ccb->cmd;
363 dst = (struct st_sgtable *)req->variable;
364 dst->max_sg_count = cpu_to_le16(ST_MAX_SG);
365 dst->sz_in_byte = cpu_to_le32(cmd->request_bufflen);
366
367 if (cmd->use_sg) {
368 int n_elem;
369
370 src = (struct scatterlist *) cmd->request_buffer;
371 n_elem = pci_map_sg(pdev, src,
372 cmd->use_sg, cmd->sc_data_direction);
373 if (n_elem <= 0)
374 return -EIO;
375
376 ccb->sg_count = n_elem;
377 dst->sg_count = cpu_to_le16((u16)n_elem);
378
379 for (i = 0; i < n_elem; i++, src++) {
380 dst->table[i].count = cpu_to_le32((u32)sg_dma_len(src));
381 dst->table[i].addr =
382 cpu_to_le32(sg_dma_address(src) & 0xffffffff);
383 dst->table[i].addr_hi =
384 cpu_to_le32((sg_dma_address(src) >> 16) >> 16);
385 dst->table[i].ctrl = SG_CF_64B | SG_CF_HOST;
386 }
387 dst->table[--i].ctrl |= SG_CF_EOT;
388 return 0;
389 }
390
391 dma_handle = pci_map_single(pdev, cmd->request_buffer,
392 cmd->request_bufflen, cmd->sc_data_direction);
393 cmd->SCp.dma_handle = dma_handle;
394
395 ccb->sg_count = 1;
396 dst->sg_count = cpu_to_le16(1);
397 dst->table[0].addr = cpu_to_le32(dma_handle & 0xffffffff);
398 dst->table[0].addr_hi = cpu_to_le32((dma_handle >> 16) >> 16);
399 dst->table[0].count = cpu_to_le32((u32)cmd->request_bufflen);
400 dst->table[0].ctrl = SG_CF_EOT | SG_CF_64B | SG_CF_HOST;
401
402 return 0;
403}
404
405static void stex_internal_copy(struct scsi_cmnd *cmd,
406 const void *src, size_t *count, int sg_count)
407{
408 size_t lcount;
409 size_t len;
410 void *s, *d, *base = NULL;
411 if (*count > cmd->request_bufflen)
412 *count = cmd->request_bufflen;
413 lcount = *count;
414 while (lcount) {
415 len = lcount;
416 s = (void *)src;
417 if (cmd->use_sg) {
418 size_t offset = *count - lcount;
419 s += offset;
420 base = scsi_kmap_atomic_sg(cmd->request_buffer,
421 sg_count, &offset, &len);
422 if (base == NULL) {
423 *count -= lcount;
424 return;
425 }
426 d = base + offset;
427 } else
428 d = cmd->request_buffer;
429
430 memcpy(d, s, len);
431
432 lcount -= len;
433 if (cmd->use_sg)
434 scsi_kunmap_atomic_sg(base);
435 }
436}
437
438static int stex_direct_copy(struct scsi_cmnd *cmd,
439 const void *src, size_t count)
440{
441 struct st_hba *hba = (struct st_hba *) &cmd->device->host->hostdata[0];
442 size_t cp_len = count;
443 int n_elem = 0;
444
445 if (cmd->use_sg) {
446 n_elem = pci_map_sg(hba->pdev, cmd->request_buffer,
447 cmd->use_sg, cmd->sc_data_direction);
448 if (n_elem <= 0)
449 return 0;
450 }
451
452 stex_internal_copy(cmd, src, &cp_len, n_elem);
453
454 if (cmd->use_sg)
455 pci_unmap_sg(hba->pdev, cmd->request_buffer,
456 cmd->use_sg, cmd->sc_data_direction);
457 return cp_len == count;
458}
459
460static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
461{
462 struct st_frame *p;
463 size_t count = sizeof(struct st_frame);
464
465 p = hba->copy_buffer;
466 memset(p->base, 0, sizeof(u32)*6);
467 *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
468 p->rom_addr = 0;
469
470 p->drv_ver.major = ST_VER_MAJOR;
471 p->drv_ver.minor = ST_VER_MINOR;
472 p->drv_ver.oem = ST_OEM;
473 p->drv_ver.build = ST_BUILD_VER;
474
475 p->bus = hba->pdev->bus->number;
476 p->slot = hba->pdev->devfn;
477 p->irq_level = 0;
478 p->irq_vec = hba->pdev->irq;
479 p->id = hba->pdev->vendor << 16 | hba->pdev->device;
480 p->subid =
481 hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device;
482
483 stex_internal_copy(ccb->cmd, p, &count, ccb->sg_count);
484}
485
486static void
487stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
488{
489 req->tag = cpu_to_le16(tag);
490 req->task_attr = TASK_ATTRIBUTE_SIMPLE;
491 req->task_manage = 0; /* not supported yet */
492 req->payload_sz = (u8)(sizeof(struct req_msg)/sizeof(u32));
493
494 hba->ccb[tag].req = req;
495 hba->out_req_cnt++;
496
497 writel(hba->req_head, hba->mmio_base + IMR0);
498 writel(MU_INBOUND_DOORBELL_REQHEADCHANGED, hba->mmio_base + IDBL);
499 readl(hba->mmio_base + IDBL); /* flush */
500}
501
502static int
503stex_slave_alloc(struct scsi_device *sdev)
504{
505 /* Cheat: usually extracted from Inquiry data */
506 sdev->tagged_supported = 1;
507
508 scsi_activate_tcq(sdev, sdev->host->can_queue);
509
510 return 0;
511}
512
513static int
514stex_slave_config(struct scsi_device *sdev)
515{
516 sdev->use_10_for_rw = 1;
517 sdev->use_10_for_ms = 1;
518 sdev->timeout = 60 * HZ;
519 sdev->tagged_supported = 1;
520
521 return 0;
522}
523
524static void
525stex_slave_destroy(struct scsi_device *sdev)
526{
527 scsi_deactivate_tcq(sdev, 1);
528}
529
530static int
531stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
532{
533 struct st_hba *hba;
534 struct Scsi_Host *host;
535 unsigned int id,lun;
536 struct req_msg *req;
537 u16 tag;
538 host = cmd->device->host;
539 id = cmd->device->id;
540 lun = cmd->device->channel; /* firmware lun issue work around */
541 hba = (struct st_hba *) &host->hostdata[0];
542
543 switch (cmd->cmnd[0]) {
544 case MODE_SENSE_10:
545 {
546 static char ms10_caching_page[12] =
547 { 0, 0x12, 0, 0, 0, 0, 0, 0, 0x8, 0xa, 0x4, 0 };
548 unsigned char page;
549 page = cmd->cmnd[2] & 0x3f;
550 if (page == 0x8 || page == 0x3f) {
551 stex_direct_copy(cmd, ms10_caching_page,
552 sizeof(ms10_caching_page));
553 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
554 done(cmd);
555 } else
556 stex_invalid_field(cmd, done);
557 return 0;
558 }
559 case INQUIRY:
560 if (id != ST_MAX_ARRAY_SUPPORTED)
561 break;
562 if (lun == 0 && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) {
563 stex_direct_copy(cmd, console_inq_page,
564 sizeof(console_inq_page));
565 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
566 done(cmd);
567 } else
568 stex_invalid_field(cmd, done);
569 return 0;
570 case PASSTHRU_CMD:
571 if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) {
572 struct st_drvver ver;
573 ver.major = ST_VER_MAJOR;
574 ver.minor = ST_VER_MINOR;
575 ver.oem = ST_OEM;
576 ver.build = ST_BUILD_VER;
577 ver.signature[0] = PASSTHRU_SIGNATURE;
578 ver.console_id = ST_MAX_ARRAY_SUPPORTED;
579 ver.host_no = hba->host->host_no;
580 cmd->result = stex_direct_copy(cmd, &ver, sizeof(ver)) ?
581 DID_OK << 16 | COMMAND_COMPLETE << 8 :
582 DID_ERROR << 16 | COMMAND_COMPLETE << 8;
583 done(cmd);
584 return 0;
585 }
586 default:
587 break;
588 }
589
590 cmd->scsi_done = done;
591
592 tag = cmd->request->tag;
593
594 if (unlikely(tag >= host->can_queue))
595 return SCSI_MLQUEUE_HOST_BUSY;
596
597 req = stex_alloc_req(hba);
598 req->lun = lun;
599 req->target = id;
600
601 /* cdb */
602 memcpy(req->cdb, cmd->cmnd, STEX_CDB_LENGTH);
603
604 hba->ccb[tag].cmd = cmd;
605 hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE;
606 hba->ccb[tag].sense_buffer = cmd->sense_buffer;
607 hba->ccb[tag].req_type = 0;
608
609 if (cmd->sc_data_direction != DMA_NONE)
610 stex_map_sg(hba, req, &hba->ccb[tag]);
611
612 stex_send_cmd(hba, req, tag);
613 return 0;
614}
615
616static void stex_unmap_sg(struct st_hba *hba, struct scsi_cmnd *cmd)
617{
618 if (cmd->sc_data_direction != DMA_NONE) {
619 if (cmd->use_sg)
620 pci_unmap_sg(hba->pdev, cmd->request_buffer,
621 cmd->use_sg, cmd->sc_data_direction);
622 else
623 pci_unmap_single(hba->pdev, cmd->SCp.dma_handle,
624 cmd->request_bufflen, cmd->sc_data_direction);
625 }
626}
627
628static void stex_scsi_done(struct st_ccb *ccb)
629{
630 struct scsi_cmnd *cmd = ccb->cmd;
631 int result;
632
633 if (ccb->srb_status == SRB_STATUS_SUCCESS || ccb->srb_status == 0) {
634 result = ccb->scsi_status;
635 switch (ccb->scsi_status) {
636 case SAM_STAT_GOOD:
637 result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
638 break;
639 case SAM_STAT_CHECK_CONDITION:
640 result |= DRIVER_SENSE << 24;
641 break;
642 case SAM_STAT_BUSY:
643 result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
644 break;
645 default:
646 result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8;
647 break;
648 }
649 }
650 else if (ccb->srb_status & SRB_SEE_SENSE)
651 result = DRIVER_SENSE << 24 | SAM_STAT_CHECK_CONDITION;
652 else switch (ccb->srb_status) {
653 case SRB_STATUS_SELECTION_TIMEOUT:
654 result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
655 break;
656 case SRB_STATUS_BUSY:
657 result = DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
658 break;
659 case SRB_STATUS_INVALID_REQUEST:
660 case SRB_STATUS_ERROR:
661 default:
662 result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
663 break;
664 }
665
666 cmd->result = result;
667 cmd->scsi_done(cmd);
668}
669
670static void stex_copy_data(struct st_ccb *ccb,
671 struct status_msg *resp, unsigned int variable)
672{
673 size_t count = variable;
674 if (resp->scsi_status != SAM_STAT_GOOD) {
675 if (ccb->sense_buffer != NULL)
676 memcpy(ccb->sense_buffer, resp->variable,
677 min(variable, ccb->sense_bufflen));
678 return;
679 }
680
681 if (ccb->cmd == NULL)
682 return;
683 stex_internal_copy(ccb->cmd, resp->variable, &count, ccb->sg_count);
684}
685
686static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
687{
688 void __iomem *base = hba->mmio_base;
689 struct status_msg *resp;
690 struct st_ccb *ccb;
691 unsigned int size;
692 u16 tag;
693
694 if (!(doorbell & MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED))
695 return;
696
697 /* status payloads */
698 hba->status_head = readl(base + OMR1);
699 if (unlikely(hba->status_head >= MU_STATUS_COUNT)) {
700 printk(KERN_WARNING DRV_NAME "(%s): invalid status head\n",
701 pci_name(hba->pdev));
702 return;
703 }
704
705 if (unlikely(hba->mu_status != MU_STATE_STARTED ||
706 hba->out_req_cnt <= 0)) {
707 hba->status_tail = hba->status_head;
708 goto update_status;
709 }
710
711 while (hba->status_tail != hba->status_head) {
712 resp = stex_get_status(hba);
713 tag = le16_to_cpu(resp->tag);
714 if (unlikely(tag >= hba->host->can_queue)) {
715 printk(KERN_WARNING DRV_NAME
716 "(%s): invalid tag\n", pci_name(hba->pdev));
717 continue;
718 }
719
720 ccb = &hba->ccb[tag];
721 if (hba->wait_ccb == ccb)
722 hba->wait_ccb = NULL;
723 if (unlikely(ccb->req == NULL)) {
724 printk(KERN_WARNING DRV_NAME
725 "(%s): lagging req\n", pci_name(hba->pdev));
726 continue;
727 }
728
729 size = resp->payload_sz * sizeof(u32); /* payload size */
730 if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN ||
731 size > sizeof(*resp))) {
732 printk(KERN_WARNING DRV_NAME "(%s): bad status size\n",
733 pci_name(hba->pdev));
734 } else {
735 size -= sizeof(*resp) - STATUS_VAR_LEN; /* copy size */
736 if (size)
737 stex_copy_data(ccb, resp, size);
738 }
739
740 ccb->srb_status = resp->srb_status;
741 ccb->scsi_status = resp->scsi_status;
742
743 if (likely(ccb->cmd != NULL)) {
744 if (unlikely(ccb->cmd->cmnd[0] == PASSTHRU_CMD &&
745 ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER))
746 stex_controller_info(hba, ccb);
747 stex_unmap_sg(hba, ccb->cmd);
748 stex_scsi_done(ccb);
749 hba->out_req_cnt--;
750 } else if (ccb->req_type & PASSTHRU_REQ_TYPE) {
751 hba->out_req_cnt--;
752 if (ccb->req_type & PASSTHRU_REQ_NO_WAKEUP) {
753 ccb->req_type = 0;
754 continue;
755 }
756 ccb->req_type = 0;
757 if (waitqueue_active(&hba->waitq))
758 wake_up(&hba->waitq);
759 }
760 }
761
762update_status:
763 writel(hba->status_head, base + IMR1);
764 readl(base + IMR1); /* flush */
765}
766
767static irqreturn_t stex_intr(int irq, void *__hba, struct pt_regs *regs)
768{
769 struct st_hba *hba = __hba;
770 void __iomem *base = hba->mmio_base;
771 u32 data;
772 unsigned long flags;
773 int handled = 0;
774
775 spin_lock_irqsave(hba->host->host_lock, flags);
776
777 data = readl(base + ODBL);
778
779 if (data && data != 0xffffffff) {
780 /* clear the interrupt */
781 writel(data, base + ODBL);
782 readl(base + ODBL); /* flush */
783 stex_mu_intr(hba, data);
784 handled = 1;
785 }
786
787 spin_unlock_irqrestore(hba->host->host_lock, flags);
788
789 return IRQ_RETVAL(handled);
790}
791
792static int stex_handshake(struct st_hba *hba)
793{
794 void __iomem *base = hba->mmio_base;
795 struct handshake_frame *h;
796 dma_addr_t status_phys;
797 int i;
798
799 if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
800 writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
801 readl(base + IDBL);
802 for (i = 0; readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE
803 && i < MU_MAX_DELAY_TIME; i++) {
804 rmb();
805 msleep(1);
806 }
807
808 if (i == MU_MAX_DELAY_TIME) {
809 printk(KERN_ERR DRV_NAME
810 "(%s): no handshake signature\n",
811 pci_name(hba->pdev));
812 return -1;
813 }
814 }
815
816 udelay(10);
817
818 h = (struct handshake_frame *)(hba->dma_mem + MU_REQ_BUFFER_SIZE);
819 h->rb_phy = cpu_to_le32(hba->dma_handle);
820 h->rb_phy_hi = cpu_to_le32((hba->dma_handle >> 16) >> 16);
821 h->req_sz = cpu_to_le16(sizeof(struct req_msg));
822 h->req_cnt = cpu_to_le16(MU_REQ_COUNT);
823 h->status_sz = cpu_to_le16(sizeof(struct status_msg));
824 h->status_cnt = cpu_to_le16(MU_STATUS_COUNT);
825 stex_gettime(&h->hosttime);
826 h->partner_type = HMU_PARTNER_TYPE;
827
828 status_phys = hba->dma_handle + MU_REQ_BUFFER_SIZE;
829 writel(status_phys, base + IMR0);
830 readl(base + IMR0);
831 writel((status_phys >> 16) >> 16, base + IMR1);
832 readl(base + IMR1);
833
834 writel((status_phys >> 16) >> 16, base + OMR0); /* old fw compatible */
835 readl(base + OMR0);
836 writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
837 readl(base + IDBL); /* flush */
838
839 udelay(10);
840 for (i = 0; readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE
841 && i < MU_MAX_DELAY_TIME; i++) {
842 rmb();
843 msleep(1);
844 }
845
846 if (i == MU_MAX_DELAY_TIME) {
847 printk(KERN_ERR DRV_NAME
848 "(%s): no signature after handshake frame\n",
849 pci_name(hba->pdev));
850 return -1;
851 }
852
853 writel(0, base + IMR0);
854 readl(base + IMR0);
855 writel(0, base + OMR0);
856 readl(base + OMR0);
857 writel(0, base + IMR1);
858 readl(base + IMR1);
859 writel(0, base + OMR1);
860 readl(base + OMR1); /* flush */
861 hba->mu_status = MU_STATE_STARTED;
862 return 0;
863}
864
865static int stex_abort(struct scsi_cmnd *cmd)
866{
867 struct Scsi_Host *host = cmd->device->host;
868 struct st_hba *hba = (struct st_hba *)host->hostdata;
869 u16 tag = cmd->request->tag;
870 void __iomem *base;
871 u32 data;
872 int result = SUCCESS;
873 unsigned long flags;
874 base = hba->mmio_base;
875 spin_lock_irqsave(host->host_lock, flags);
876 if (tag < host->can_queue && hba->ccb[tag].cmd == cmd)
877 hba->wait_ccb = &hba->ccb[tag];
878 else {
879 for (tag = 0; tag < host->can_queue; tag++)
880 if (hba->ccb[tag].cmd == cmd) {
881 hba->wait_ccb = &hba->ccb[tag];
882 break;
883 }
884 if (tag >= host->can_queue)
885 goto out;
886 }
887
888 data = readl(base + ODBL);
889 if (data == 0 || data == 0xffffffff)
890 goto fail_out;
891
892 writel(data, base + ODBL);
893 readl(base + ODBL); /* flush */
894
895 stex_mu_intr(hba, data);
896
897 if (hba->wait_ccb == NULL) {
898 printk(KERN_WARNING DRV_NAME
899 "(%s): lost interrupt\n", pci_name(hba->pdev));
900 goto out;
901 }
902
903fail_out:
904 stex_unmap_sg(hba, cmd);
905 hba->wait_ccb->req = NULL; /* nullify the req's future return */
906 hba->wait_ccb = NULL;
907 result = FAILED;
908out:
909 spin_unlock_irqrestore(host->host_lock, flags);
910 return result;
911}
912
913static void stex_hard_reset(struct st_hba *hba)
914{
915 struct pci_bus *bus;
916 int i;
917 u16 pci_cmd;
918 u8 pci_bctl;
919
920 for (i = 0; i < 16; i++)
921 pci_read_config_dword(hba->pdev, i * 4,
922 &hba->pdev->saved_config_space[i]);
923
924 /* Reset secondary bus. Our controller(MU/ATU) is the only device on
925 secondary bus. Consult Intel 80331/3 developer's manual for detail */
926 bus = hba->pdev->bus;
927 pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &pci_bctl);
928 pci_bctl |= PCI_BRIDGE_CTL_BUS_RESET;
929 pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
930 msleep(1);
931 pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
932 pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
933
934 for (i = 0; i < MU_MAX_DELAY_TIME; i++) {
935 pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd);
936 if (pci_cmd & PCI_COMMAND_MASTER)
937 break;
938 msleep(1);
939 }
940
941 ssleep(5);
942 for (i = 0; i < 16; i++)
943 pci_write_config_dword(hba->pdev, i * 4,
944 hba->pdev->saved_config_space[i]);
945}
946
947static int stex_reset(struct scsi_cmnd *cmd)
948{
949 struct st_hba *hba;
950 unsigned long flags;
951 hba = (struct st_hba *) &cmd->device->host->hostdata[0];
952
953 hba->mu_status = MU_STATE_RESETTING;
954
955 if (hba->cardtype == st_shasta)
956 stex_hard_reset(hba);
957
958 if (stex_handshake(hba)) {
959 printk(KERN_WARNING DRV_NAME
960 "(%s): resetting: handshake failed\n",
961 pci_name(hba->pdev));
962 return FAILED;
963 }
964 spin_lock_irqsave(hba->host->host_lock, flags);
965 hba->req_head = 0;
966 hba->req_tail = 0;
967 hba->status_head = 0;
968 hba->status_tail = 0;
969 hba->out_req_cnt = 0;
970 spin_unlock_irqrestore(hba->host->host_lock, flags);
971
972 return SUCCESS;
973}
974
975static int stex_biosparam(struct scsi_device *sdev,
976 struct block_device *bdev, sector_t capacity, int geom[])
977{
978 int heads = 255, sectors = 63, cylinders;
979
980 if (capacity < 0x200000) {
981 heads = 64;
982 sectors = 32;
983 }
984
985 cylinders = sector_div(capacity, heads * sectors);
986
987 geom[0] = heads;
988 geom[1] = sectors;
989 geom[2] = cylinders;
990
991 return 0;
992}
993
994static struct scsi_host_template driver_template = {
995 .module = THIS_MODULE,
996 .name = DRV_NAME,
997 .proc_name = DRV_NAME,
998 .bios_param = stex_biosparam,
999 .queuecommand = stex_queuecommand,
1000 .slave_alloc = stex_slave_alloc,
1001 .slave_configure = stex_slave_config,
1002 .slave_destroy = stex_slave_destroy,
1003 .eh_abort_handler = stex_abort,
1004 .eh_host_reset_handler = stex_reset,
1005 .can_queue = ST_CAN_QUEUE,
1006 .this_id = -1,
1007 .sg_tablesize = ST_MAX_SG,
1008 .cmd_per_lun = ST_CMD_PER_LUN,
1009};
1010
1011static int stex_set_dma_mask(struct pci_dev * pdev)
1012{
1013 int ret;
1014 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)
1015 && !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1016 return 0;
1017 ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1018 if (!ret)
1019 ret = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1020 return ret;
1021}
1022
1023static int __devinit
1024stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1025{
1026 struct st_hba *hba;
1027 struct Scsi_Host *host;
1028 int err;
1029
1030 err = pci_enable_device(pdev);
1031 if (err)
1032 return err;
1033
1034 pci_set_master(pdev);
1035
1036 host = scsi_host_alloc(&driver_template, sizeof(struct st_hba));
1037
1038 if (!host) {
1039 printk(KERN_ERR DRV_NAME "(%s): scsi_host_alloc failed\n",
1040 pci_name(pdev));
1041 err = -ENOMEM;
1042 goto out_disable;
1043 }
1044
1045 hba = (struct st_hba *)host->hostdata;
1046 memset(hba, 0, sizeof(struct st_hba));
1047
1048 err = pci_request_regions(pdev, DRV_NAME);
1049 if (err < 0) {
1050 printk(KERN_ERR DRV_NAME "(%s): request regions failed\n",
1051 pci_name(pdev));
1052 goto out_scsi_host_put;
1053 }
1054
1055 hba->mmio_base = ioremap(pci_resource_start(pdev, 0),
1056 pci_resource_len(pdev, 0));
1057 if ( !hba->mmio_base) {
1058 printk(KERN_ERR DRV_NAME "(%s): memory map failed\n",
1059 pci_name(pdev));
1060 err = -ENOMEM;
1061 goto out_release_regions;
1062 }
1063
1064 err = stex_set_dma_mask(pdev);
1065 if (err) {
1066 printk(KERN_ERR DRV_NAME "(%s): set dma mask failed\n",
1067 pci_name(pdev));
1068 goto out_iounmap;
1069 }
1070
1071 hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1072 STEX_BUFFER_SIZE, &hba->dma_handle, GFP_KERNEL);
1073 if (!hba->dma_mem) {
1074 err = -ENOMEM;
1075 printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n",
1076 pci_name(pdev));
1077 goto out_iounmap;
1078 }
1079
1080 hba->status_buffer =
1081 (struct status_msg *)(hba->dma_mem + MU_REQ_BUFFER_SIZE);
1082 hba->copy_buffer = hba->dma_mem + MU_BUFFER_SIZE;
1083 hba->mu_status = MU_STATE_STARTING;
1084
1085 hba->cardtype = (unsigned int) id->driver_data;
1086
1087 /* firmware uses id/lun pair for a logical drive, but lun would be
1088 always 0 if CONFIG_SCSI_MULTI_LUN not configured, so we use
1089 channel to map lun here */
1090 host->max_channel = ST_MAX_LUN_PER_TARGET - 1;
1091 host->max_id = ST_MAX_TARGET_NUM;
1092 host->max_lun = 1;
1093 host->unique_id = host->host_no;
1094 host->max_cmd_len = STEX_CDB_LENGTH;
1095
1096 hba->host = host;
1097 hba->pdev = pdev;
1098 init_waitqueue_head(&hba->waitq);
1099
1100 err = request_irq(pdev->irq, stex_intr, IRQF_SHARED, DRV_NAME, hba);
1101 if (err) {
1102 printk(KERN_ERR DRV_NAME "(%s): request irq failed\n",
1103 pci_name(pdev));
1104 goto out_pci_free;
1105 }
1106
1107 err = stex_handshake(hba);
1108 if (err)
1109 goto out_free_irq;
1110
1111 err = scsi_init_shared_tag_map(host, ST_CAN_QUEUE);
1112 if (err) {
1113 printk(KERN_ERR DRV_NAME "(%s): init shared queue failed\n",
1114 pci_name(pdev));
1115 goto out_free_irq;
1116 }
1117
1118 pci_set_drvdata(pdev, hba);
1119
1120 err = scsi_add_host(host, &pdev->dev);
1121 if (err) {
1122 printk(KERN_ERR DRV_NAME "(%s): scsi_add_host failed\n",
1123 pci_name(pdev));
1124 goto out_free_irq;
1125 }
1126
1127 scsi_scan_host(host);
1128
1129 return 0;
1130
1131out_free_irq:
1132 free_irq(pdev->irq, hba);
1133out_pci_free:
1134 dma_free_coherent(&pdev->dev, STEX_BUFFER_SIZE,
1135 hba->dma_mem, hba->dma_handle);
1136out_iounmap:
1137 iounmap(hba->mmio_base);
1138out_release_regions:
1139 pci_release_regions(pdev);
1140out_scsi_host_put:
1141 scsi_host_put(host);
1142out_disable:
1143 pci_disable_device(pdev);
1144
1145 return err;
1146}
1147
1148static void stex_hba_stop(struct st_hba *hba)
1149{
1150 struct req_msg *req;
1151 unsigned long flags;
1152 unsigned long before;
1153 u16 tag = 0;
1154
1155 spin_lock_irqsave(hba->host->host_lock, flags);
1156 req = stex_alloc_req(hba);
1157 memset(req->cdb, 0, STEX_CDB_LENGTH);
1158
1159 req->cdb[0] = CONTROLLER_CMD;
1160 req->cdb[1] = CTLR_POWER_STATE_CHANGE;
1161 req->cdb[2] = CTLR_POWER_SAVING;
1162
1163 hba->ccb[tag].cmd = NULL;
1164 hba->ccb[tag].sg_count = 0;
1165 hba->ccb[tag].sense_bufflen = 0;
1166 hba->ccb[tag].sense_buffer = NULL;
1167 hba->ccb[tag].req_type |= PASSTHRU_REQ_TYPE;
1168
1169 stex_send_cmd(hba, req, tag);
1170 spin_unlock_irqrestore(hba->host->host_lock, flags);
1171
1172 before = jiffies;
1173 while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) {
1174 if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ))
1175 return;
1176 msleep(10);
1177 }
1178}
1179
1180static void stex_hba_free(struct st_hba *hba)
1181{
1182 free_irq(hba->pdev->irq, hba);
1183
1184 iounmap(hba->mmio_base);
1185
1186 pci_release_regions(hba->pdev);
1187
1188 dma_free_coherent(&hba->pdev->dev, STEX_BUFFER_SIZE,
1189 hba->dma_mem, hba->dma_handle);
1190}
1191
1192static void stex_remove(struct pci_dev *pdev)
1193{
1194 struct st_hba *hba = pci_get_drvdata(pdev);
1195
1196 scsi_remove_host(hba->host);
1197
1198 pci_set_drvdata(pdev, NULL);
1199
1200 stex_hba_stop(hba);
1201
1202 stex_hba_free(hba);
1203
1204 scsi_host_put(hba->host);
1205
1206 pci_disable_device(pdev);
1207}
1208
1209static void stex_shutdown(struct pci_dev *pdev)
1210{
1211 struct st_hba *hba = pci_get_drvdata(pdev);
1212
1213 stex_hba_stop(hba);
1214}
1215
1216static struct pci_device_id stex_pci_tbl[] = {
1217 { 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
1218 { 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
1219 { 0x105a, 0xf350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
1220 { 0x105a, 0x4301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
1221 { 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
1222 { 0x105a, 0x8301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
1223 { 0x105a, 0x8302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
1224 { 0x1725, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc },
1225 { } /* terminate list */
1226};
1227MODULE_DEVICE_TABLE(pci, stex_pci_tbl);
1228
1229static struct pci_driver stex_pci_driver = {
1230 .name = DRV_NAME,
1231 .id_table = stex_pci_tbl,
1232 .probe = stex_probe,
1233 .remove = __devexit_p(stex_remove),
1234 .shutdown = stex_shutdown,
1235};
1236
1237static int __init stex_init(void)
1238{
1239 printk(KERN_INFO DRV_NAME
1240 ": Promise SuperTrak EX Driver version: %s\n",
1241 ST_DRIVER_VERSION);
1242
1243 return pci_register_driver(&stex_pci_driver);
1244}
1245
1246static void __exit stex_exit(void)
1247{
1248 pci_unregister_driver(&stex_pci_driver);
1249}
1250
1251module_init(stex_init);
1252module_exit(stex_exit);
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
index e681681ab7a2..0372aa9fa190 100644
--- a/drivers/scsi/ultrastor.c
+++ b/drivers/scsi/ultrastor.c
@@ -196,8 +196,8 @@ struct mscp {
196 u32 sense_data PACKED; 196 u32 sense_data PACKED;
197 /* The following fields are for software only. They are included in 197 /* The following fields are for software only. They are included in
198 the MSCP structure because they are associated with SCSI requests. */ 198 the MSCP structure because they are associated with SCSI requests. */
199 void (*done)(Scsi_Cmnd *); 199 void (*done) (struct scsi_cmnd *);
200 Scsi_Cmnd *SCint; 200 struct scsi_cmnd *SCint;
201 ultrastor_sg_list sglist[ULTRASTOR_24F_MAX_SG]; /* use larger size for 24F */ 201 ultrastor_sg_list sglist[ULTRASTOR_24F_MAX_SG]; /* use larger size for 24F */
202}; 202};
203 203
@@ -289,7 +289,7 @@ static const unsigned short ultrastor_ports_14f[] = {
289 289
290static void ultrastor_interrupt(int, void *, struct pt_regs *); 290static void ultrastor_interrupt(int, void *, struct pt_regs *);
291static irqreturn_t do_ultrastor_interrupt(int, void *, struct pt_regs *); 291static irqreturn_t do_ultrastor_interrupt(int, void *, struct pt_regs *);
292static inline void build_sg_list(struct mscp *, Scsi_Cmnd *SCpnt); 292static inline void build_sg_list(struct mscp *, struct scsi_cmnd *SCpnt);
293 293
294 294
295/* Always called with host lock held */ 295/* Always called with host lock held */
@@ -673,7 +673,7 @@ static const char *ultrastor_info(struct Scsi_Host * shpnt)
673 return buf; 673 return buf;
674} 674}
675 675
676static inline void build_sg_list(struct mscp *mscp, Scsi_Cmnd *SCpnt) 676static inline void build_sg_list(struct mscp *mscp, struct scsi_cmnd *SCpnt)
677{ 677{
678 struct scatterlist *sl; 678 struct scatterlist *sl;
679 long transfer_length = 0; 679 long transfer_length = 0;
@@ -694,7 +694,8 @@ static inline void build_sg_list(struct mscp *mscp, Scsi_Cmnd *SCpnt)
694 mscp->transfer_data_length = transfer_length; 694 mscp->transfer_data_length = transfer_length;
695} 695}
696 696
697static int ultrastor_queuecommand(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) 697static int ultrastor_queuecommand(struct scsi_cmnd *SCpnt,
698 void (*done) (struct scsi_cmnd *))
698{ 699{
699 struct mscp *my_mscp; 700 struct mscp *my_mscp;
700#if ULTRASTOR_MAX_CMDS > 1 701#if ULTRASTOR_MAX_CMDS > 1
@@ -833,7 +834,7 @@ retry:
833 834
834 */ 835 */
835 836
836static int ultrastor_abort(Scsi_Cmnd *SCpnt) 837static int ultrastor_abort(struct scsi_cmnd *SCpnt)
837{ 838{
838#if ULTRASTOR_DEBUG & UD_ABORT 839#if ULTRASTOR_DEBUG & UD_ABORT
839 char out[108]; 840 char out[108];
@@ -843,7 +844,7 @@ static int ultrastor_abort(Scsi_Cmnd *SCpnt)
843 unsigned int mscp_index; 844 unsigned int mscp_index;
844 unsigned char old_aborted; 845 unsigned char old_aborted;
845 unsigned long flags; 846 unsigned long flags;
846 void (*done)(Scsi_Cmnd *); 847 void (*done)(struct scsi_cmnd *);
847 struct Scsi_Host *host = SCpnt->device->host; 848 struct Scsi_Host *host = SCpnt->device->host;
848 849
849 if(config.slot) 850 if(config.slot)
@@ -960,7 +961,7 @@ static int ultrastor_abort(Scsi_Cmnd *SCpnt)
960 return SUCCESS; 961 return SUCCESS;
961} 962}
962 963
963static int ultrastor_host_reset(Scsi_Cmnd * SCpnt) 964static int ultrastor_host_reset(struct scsi_cmnd * SCpnt)
964{ 965{
965 unsigned long flags; 966 unsigned long flags;
966 int i; 967 int i;
@@ -1045,8 +1046,8 @@ static void ultrastor_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1045 unsigned int mscp_index; 1046 unsigned int mscp_index;
1046#endif 1047#endif
1047 struct mscp *mscp; 1048 struct mscp *mscp;
1048 void (*done)(Scsi_Cmnd *); 1049 void (*done) (struct scsi_cmnd *);
1049 Scsi_Cmnd *SCtmp; 1050 struct scsi_cmnd *SCtmp;
1050 1051
1051#if ULTRASTOR_MAX_CMDS == 1 1052#if ULTRASTOR_MAX_CMDS == 1
1052 mscp = &config.mscp[0]; 1053 mscp = &config.mscp[0];
@@ -1079,7 +1080,7 @@ static void ultrastor_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1079 return; 1080 return;
1080 } 1081 }
1081 if (icm_status == 3) { 1082 if (icm_status == 3) {
1082 void (*done)(Scsi_Cmnd *) = mscp->done; 1083 void (*done)(struct scsi_cmnd *) = mscp->done;
1083 if (done) { 1084 if (done) {
1084 mscp->done = NULL; 1085 mscp->done = NULL;
1085 mscp->SCint->result = DID_ABORT << 16; 1086 mscp->SCint->result = DID_ABORT << 16;
diff --git a/drivers/scsi/ultrastor.h b/drivers/scsi/ultrastor.h
index da759a11deff..a692905f95f7 100644
--- a/drivers/scsi/ultrastor.h
+++ b/drivers/scsi/ultrastor.h
@@ -14,11 +14,13 @@
14#define _ULTRASTOR_H 14#define _ULTRASTOR_H
15 15
16static int ultrastor_detect(struct scsi_host_template *); 16static int ultrastor_detect(struct scsi_host_template *);
17static const char *ultrastor_info(struct Scsi_Host * shpnt); 17static const char *ultrastor_info(struct Scsi_Host *shpnt);
18static int ultrastor_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); 18static int ultrastor_queuecommand(struct scsi_cmnd *,
19static int ultrastor_abort(Scsi_Cmnd *); 19 void (*done)(struct scsi_cmnd *));
20static int ultrastor_host_reset(Scsi_Cmnd *); 20static int ultrastor_abort(struct scsi_cmnd *);
21static int ultrastor_biosparam(struct scsi_device *, struct block_device *, sector_t, int *); 21static int ultrastor_host_reset(struct scsi_cmnd *);
22static int ultrastor_biosparam(struct scsi_device *, struct block_device *,
23 sector_t, int *);
22 24
23 25
24#define ULTRASTOR_14F_MAX_SG 16 26#define ULTRASTOR_14F_MAX_SG 16
diff --git a/drivers/serial/pmac_zilog.c b/drivers/serial/pmac_zilog.c
index bfd2a22759eb..a3b99caf80e6 100644
--- a/drivers/serial/pmac_zilog.c
+++ b/drivers/serial/pmac_zilog.c
@@ -1400,8 +1400,8 @@ static struct uart_ops pmz_pops = {
1400static int __init pmz_init_port(struct uart_pmac_port *uap) 1400static int __init pmz_init_port(struct uart_pmac_port *uap)
1401{ 1401{
1402 struct device_node *np = uap->node; 1402 struct device_node *np = uap->node;
1403 char *conn; 1403 const char *conn;
1404 struct slot_names_prop { 1404 const struct slot_names_prop {
1405 int count; 1405 int count;
1406 char name[1]; 1406 char name[1];
1407 } *slots; 1407 } *slots;
@@ -1458,7 +1458,7 @@ no_dma:
1458 uap->flags |= PMACZILOG_FLAG_IS_IRDA; 1458 uap->flags |= PMACZILOG_FLAG_IS_IRDA;
1459 uap->port_type = PMAC_SCC_ASYNC; 1459 uap->port_type = PMAC_SCC_ASYNC;
1460 /* 1999 Powerbook G3 has slot-names property instead */ 1460 /* 1999 Powerbook G3 has slot-names property instead */
1461 slots = (struct slot_names_prop *)get_property(np, "slot-names", &len); 1461 slots = get_property(np, "slot-names", &len);
1462 if (slots && slots->count > 0) { 1462 if (slots && slots->count > 0) {
1463 if (strcmp(slots->name, "IrDA") == 0) 1463 if (strcmp(slots->name, "IrDA") == 0)
1464 uap->flags |= PMACZILOG_FLAG_IS_IRDA; 1464 uap->flags |= PMACZILOG_FLAG_IS_IRDA;
@@ -1470,7 +1470,8 @@ no_dma:
1470 if (ZS_IS_INTMODEM(uap)) { 1470 if (ZS_IS_INTMODEM(uap)) {
1471 struct device_node* i2c_modem = find_devices("i2c-modem"); 1471 struct device_node* i2c_modem = find_devices("i2c-modem");
1472 if (i2c_modem) { 1472 if (i2c_modem) {
1473 char* mid = get_property(i2c_modem, "modem-id", NULL); 1473 const char* mid =
1474 get_property(i2c_modem, "modem-id", NULL);
1474 if (mid) switch(*mid) { 1475 if (mid) switch(*mid) {
1475 case 0x04 : 1476 case 0x04 :
1476 case 0x05 : 1477 case 0x05 :
diff --git a/drivers/usb/input/hid-core.c b/drivers/usb/input/hid-core.c
index a2c56b2de589..3305fb6079eb 100644
--- a/drivers/usb/input/hid-core.c
+++ b/drivers/usb/input/hid-core.c
@@ -1818,7 +1818,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
1818 int n, len, insize = 0; 1818 int n, len, insize = 0;
1819 1819
1820 /* Ignore all Wacom devices */ 1820 /* Ignore all Wacom devices */
1821 if (dev->descriptor.idVendor == USB_VENDOR_ID_WACOM) 1821 if (le16_to_cpu(dev->descriptor.idVendor) == USB_VENDOR_ID_WACOM)
1822 return NULL; 1822 return NULL;
1823 1823
1824 for (n = 0; hid_blacklist[n].idVendor; n++) 1824 for (n = 0; hid_blacklist[n].idVendor; n++)
diff --git a/drivers/video/S3triofb.c b/drivers/video/S3triofb.c
index afd146f5f683..397005eb392d 100644
--- a/drivers/video/S3triofb.c
+++ b/drivers/video/S3triofb.c
@@ -349,30 +349,30 @@ static void __init s3triofb_of_init(struct device_node *dp)
349 s3trio_name[sizeof(s3trio_name)-1] = '\0'; 349 s3trio_name[sizeof(s3trio_name)-1] = '\0';
350 strcpy(fb_fix.id, s3trio_name); 350 strcpy(fb_fix.id, s3trio_name);
351 351
352 if((pp = (int *)get_property(dp, "vendor-id", &len)) != NULL 352 if((pp = get_property(dp, "vendor-id", &len)) != NULL
353 && *pp!=PCI_VENDOR_ID_S3) { 353 && *pp!=PCI_VENDOR_ID_S3) {
354 printk("%s: can't find S3 Trio board\n", dp->full_name); 354 printk("%s: can't find S3 Trio board\n", dp->full_name);
355 return; 355 return;
356 } 356 }
357 357
358 if((pp = (int *)get_property(dp, "device-id", &len)) != NULL 358 if((pp = get_property(dp, "device-id", &len)) != NULL
359 && *pp!=PCI_DEVICE_ID_S3_TRIO) { 359 && *pp!=PCI_DEVICE_ID_S3_TRIO) {
360 printk("%s: can't find S3 Trio board\n", dp->full_name); 360 printk("%s: can't find S3 Trio board\n", dp->full_name);
361 return; 361 return;
362 } 362 }
363 363
364 if ((pp = (int *)get_property(dp, "depth", &len)) != NULL 364 if ((pp = get_property(dp, "depth", &len)) != NULL
365 && len == sizeof(int) && *pp != 8) { 365 && len == sizeof(int) && *pp != 8) {
366 printk("%s: can't use depth = %d\n", dp->full_name, *pp); 366 printk("%s: can't use depth = %d\n", dp->full_name, *pp);
367 return; 367 return;
368 } 368 }
369 if ((pp = (int *)get_property(dp, "width", &len)) != NULL 369 if ((pp = get_property(dp, "width", &len)) != NULL
370 && len == sizeof(int)) 370 && len == sizeof(int))
371 fb_var.xres = fb_var.xres_virtual = *pp; 371 fb_var.xres = fb_var.xres_virtual = *pp;
372 if ((pp = (int *)get_property(dp, "height", &len)) != NULL 372 if ((pp = get_property(dp, "height", &len)) != NULL
373 && len == sizeof(int)) 373 && len == sizeof(int))
374 fb_var.yres = fb_var.yres_virtual = *pp; 374 fb_var.yres = fb_var.yres_virtual = *pp;
375 if ((pp = (int *)get_property(dp, "linebytes", &len)) != NULL 375 if ((pp = get_property(dp, "linebytes", &len)) != NULL
376 && len == sizeof(int)) 376 && len == sizeof(int))
377 fb_fix.line_length = *pp; 377 fb_fix.line_length = *pp;
378 else 378 else
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 8e3400d5dd21..0ed577e7cc21 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -413,11 +413,11 @@ static int __devinit radeon_find_mem_vbios(struct radeonfb_info *rinfo)
413static int __devinit radeon_read_xtal_OF (struct radeonfb_info *rinfo) 413static int __devinit radeon_read_xtal_OF (struct radeonfb_info *rinfo)
414{ 414{
415 struct device_node *dp = rinfo->of_node; 415 struct device_node *dp = rinfo->of_node;
416 u32 *val; 416 const u32 *val;
417 417
418 if (dp == NULL) 418 if (dp == NULL)
419 return -ENODEV; 419 return -ENODEV;
420 val = (u32 *) get_property(dp, "ATY,RefCLK", NULL); 420 val = get_property(dp, "ATY,RefCLK", NULL);
421 if (!val || !*val) { 421 if (!val || !*val) {
422 printk(KERN_WARNING "radeonfb: No ATY,RefCLK property !\n"); 422 printk(KERN_WARNING "radeonfb: No ATY,RefCLK property !\n");
423 return -EINVAL; 423 return -EINVAL;
@@ -425,11 +425,11 @@ static int __devinit radeon_read_xtal_OF (struct radeonfb_info *rinfo)
425 425
426 rinfo->pll.ref_clk = (*val) / 10; 426 rinfo->pll.ref_clk = (*val) / 10;
427 427
428 val = (u32 *) get_property(dp, "ATY,SCLK", NULL); 428 val = get_property(dp, "ATY,SCLK", NULL);
429 if (val && *val) 429 if (val && *val)
430 rinfo->pll.sclk = (*val) / 10; 430 rinfo->pll.sclk = (*val) / 10;
431 431
432 val = (u32 *) get_property(dp, "ATY,MCLK", NULL); 432 val = get_property(dp, "ATY,MCLK", NULL);
433 if (val && *val) 433 if (val && *val)
434 rinfo->pll.mclk = (*val) / 10; 434 rinfo->pll.mclk = (*val) / 10;
435 435
diff --git a/drivers/video/aty/radeon_monitor.c b/drivers/video/aty/radeon_monitor.c
index 98c05bc0de44..ea531a6f45d1 100644
--- a/drivers/video/aty/radeon_monitor.c
+++ b/drivers/video/aty/radeon_monitor.c
@@ -64,13 +64,13 @@ static int __devinit radeon_parse_montype_prop(struct device_node *dp, u8 **out_
64{ 64{
65 static char *propnames[] = { "DFP,EDID", "LCD,EDID", "EDID", 65 static char *propnames[] = { "DFP,EDID", "LCD,EDID", "EDID",
66 "EDID1", "EDID2", NULL }; 66 "EDID1", "EDID2", NULL };
67 u8 *pedid = NULL; 67 const u8 *pedid = NULL;
68 u8 *pmt = NULL; 68 const u8 *pmt = NULL;
69 u8 *tmp; 69 u8 *tmp;
70 int i, mt = MT_NONE; 70 int i, mt = MT_NONE;
71 71
72 RTRACE("analyzing OF properties...\n"); 72 RTRACE("analyzing OF properties...\n");
73 pmt = (u8 *)get_property(dp, "display-type", NULL); 73 pmt = get_property(dp, "display-type", NULL);
74 if (!pmt) 74 if (!pmt)
75 return MT_NONE; 75 return MT_NONE;
76 RTRACE("display-type: %s\n", pmt); 76 RTRACE("display-type: %s\n", pmt);
@@ -89,7 +89,7 @@ static int __devinit radeon_parse_montype_prop(struct device_node *dp, u8 **out_
89 } 89 }
90 90
91 for (i = 0; propnames[i] != NULL; ++i) { 91 for (i = 0; propnames[i] != NULL; ++i) {
92 pedid = (u8 *)get_property(dp, propnames[i], NULL); 92 pedid = get_property(dp, propnames[i], NULL);
93 if (pedid != NULL) 93 if (pedid != NULL)
94 break; 94 break;
95 } 95 }
@@ -124,14 +124,14 @@ static int __devinit radeon_probe_OF_head(struct radeonfb_info *rinfo, int head_
124 return MT_NONE; 124 return MT_NONE;
125 125
126 if (rinfo->has_CRTC2) { 126 if (rinfo->has_CRTC2) {
127 char *pname; 127 const char *pname;
128 int len, second = 0; 128 int len, second = 0;
129 129
130 dp = dp->child; 130 dp = dp->child;
131 do { 131 do {
132 if (!dp) 132 if (!dp)
133 return MT_NONE; 133 return MT_NONE;
134 pname = (char *)get_property(dp, "name", NULL); 134 pname = get_property(dp, "name", NULL);
135 if (!pname) 135 if (!pname)
136 return MT_NONE; 136 return MT_NONE;
137 len = strlen(pname); 137 len = strlen(pname);
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index f31e606a2ded..e308ed2d249a 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -1268,7 +1268,7 @@ static void radeon_pm_full_reset_sdram(struct radeonfb_info *rinfo)
1268 0x21320032, 0xa1320032, 0x21320032, 0xffffffff, 1268 0x21320032, 0xa1320032, 0x21320032, 0xffffffff,
1269 0x31320032 }; 1269 0x31320032 };
1270 1270
1271 u32 *mrtable = default_mrtable; 1271 const u32 *mrtable = default_mrtable;
1272 int i, mrtable_size = ARRAY_SIZE(default_mrtable); 1272 int i, mrtable_size = ARRAY_SIZE(default_mrtable);
1273 1273
1274 mdelay(30); 1274 mdelay(30);
@@ -1287,7 +1287,7 @@ static void radeon_pm_full_reset_sdram(struct radeonfb_info *rinfo)
1287 if (rinfo->of_node != NULL) { 1287 if (rinfo->of_node != NULL) {
1288 int size; 1288 int size;
1289 1289
1290 mrtable = (u32 *)get_property(rinfo->of_node, "ATY,MRT", &size); 1290 mrtable = get_property(rinfo->of_node, "ATY,MRT", &size);
1291 if (mrtable) 1291 if (mrtable)
1292 mrtable_size = size >> 2; 1292 mrtable_size = size >> 2;
1293 else 1293 else
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 390439b3d899..1b4f75d1f8a9 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -3197,11 +3197,11 @@ static void fbcon_exit(void)
3197 return; 3197 return;
3198 3198
3199#ifdef CONFIG_ATARI 3199#ifdef CONFIG_ATARI
3200 free_irq(IRQ_AUTO_4, fbcon_vbl_handler); 3200 free_irq(IRQ_AUTO_4, fb_vbl_handler);
3201#endif 3201#endif
3202#ifdef CONFIG_MAC 3202#ifdef CONFIG_MAC
3203 if (MACH_IS_MAC && vbl_detected) 3203 if (MACH_IS_MAC && vbl_detected)
3204 free_irq(IRQ_MAC_VBL, fbcon_vbl_handler); 3204 free_irq(IRQ_MAC_VBL, fb_vbl_handler);
3205#endif 3205#endif
3206 3206
3207 kfree((void *)softback_buf); 3207 kfree((void *)softback_buf);
diff --git a/drivers/video/nvidia/nv_of.c b/drivers/video/nvidia/nv_of.c
index 8209106e26ee..d9af88c2b580 100644
--- a/drivers/video/nvidia/nv_of.c
+++ b/drivers/video/nvidia/nv_of.c
@@ -32,7 +32,7 @@ int nvidia_probe_of_connector(struct fb_info *info, int conn, u8 **out_edid)
32{ 32{
33 struct nvidia_par *par = info->par; 33 struct nvidia_par *par = info->par;
34 struct device_node *parent, *dp; 34 struct device_node *parent, *dp;
35 unsigned char *pedid = NULL; 35 const unsigned char *pedid = NULL;
36 static char *propnames[] = { 36 static char *propnames[] = {
37 "DFP,EDID", "LCD,EDID", "EDID", "EDID1", 37 "DFP,EDID", "LCD,EDID", "EDID", "EDID1",
38 "EDID,B", "EDID,A", NULL }; 38 "EDID,B", "EDID,A", NULL };
@@ -42,20 +42,19 @@ int nvidia_probe_of_connector(struct fb_info *info, int conn, u8 **out_edid)
42 if (parent == NULL) 42 if (parent == NULL)
43 return -1; 43 return -1;
44 if (par->twoHeads) { 44 if (par->twoHeads) {
45 char *pname; 45 const char *pname;
46 int len; 46 int len;
47 47
48 for (dp = NULL; 48 for (dp = NULL;
49 (dp = of_get_next_child(parent, dp)) != NULL;) { 49 (dp = of_get_next_child(parent, dp)) != NULL;) {
50 pname = (char *)get_property(dp, "name", NULL); 50 pname = get_property(dp, "name", NULL);
51 if (!pname) 51 if (!pname)
52 continue; 52 continue;
53 len = strlen(pname); 53 len = strlen(pname);
54 if ((pname[len-1] == 'A' && conn == 1) || 54 if ((pname[len-1] == 'A' && conn == 1) ||
55 (pname[len-1] == 'B' && conn == 2)) { 55 (pname[len-1] == 'B' && conn == 2)) {
56 for (i = 0; propnames[i] != NULL; ++i) { 56 for (i = 0; propnames[i] != NULL; ++i) {
57 pedid = (unsigned char *) 57 pedid = get_property(dp, propnames[i],
58 get_property(dp, propnames[i],
59 NULL); 58 NULL);
60 if (pedid != NULL) 59 if (pedid != NULL)
61 break; 60 break;
@@ -67,8 +66,7 @@ int nvidia_probe_of_connector(struct fb_info *info, int conn, u8 **out_edid)
67 } 66 }
68 if (pedid == NULL) { 67 if (pedid == NULL) {
69 for (i = 0; propnames[i] != NULL; ++i) { 68 for (i = 0; propnames[i] != NULL; ++i) {
70 pedid = (unsigned char *) 69 pedid = get_property(parent, propnames[i], NULL);
71 get_property(parent, propnames[i], NULL);
72 if (pedid != NULL) 70 if (pedid != NULL)
73 break; 71 break;
74 } 72 }
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index 0013311e0564..bad0e98fb3b6 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -409,30 +409,30 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
409 unsigned int flags, rsize, addr_prop = 0; 409 unsigned int flags, rsize, addr_prop = 0;
410 unsigned long max_size = 0; 410 unsigned long max_size = 0;
411 u64 rstart, address = OF_BAD_ADDR; 411 u64 rstart, address = OF_BAD_ADDR;
412 u32 *pp, *addrp, *up; 412 const u32 *pp, *addrp, *up;
413 u64 asize; 413 u64 asize;
414 414
415 pp = (u32 *)get_property(dp, "linux,bootx-depth", &len); 415 pp = get_property(dp, "linux,bootx-depth", &len);
416 if (pp == NULL) 416 if (pp == NULL)
417 pp = (u32 *)get_property(dp, "depth", &len); 417 pp = get_property(dp, "depth", &len);
418 if (pp && len == sizeof(u32)) 418 if (pp && len == sizeof(u32))
419 depth = *pp; 419 depth = *pp;
420 420
421 pp = (u32 *)get_property(dp, "linux,bootx-width", &len); 421 pp = get_property(dp, "linux,bootx-width", &len);
422 if (pp == NULL) 422 if (pp == NULL)
423 pp = (u32 *)get_property(dp, "width", &len); 423 pp = get_property(dp, "width", &len);
424 if (pp && len == sizeof(u32)) 424 if (pp && len == sizeof(u32))
425 width = *pp; 425 width = *pp;
426 426
427 pp = (u32 *)get_property(dp, "linux,bootx-height", &len); 427 pp = get_property(dp, "linux,bootx-height", &len);
428 if (pp == NULL) 428 if (pp == NULL)
429 pp = (u32 *)get_property(dp, "height", &len); 429 pp = get_property(dp, "height", &len);
430 if (pp && len == sizeof(u32)) 430 if (pp && len == sizeof(u32))
431 height = *pp; 431 height = *pp;
432 432
433 pp = (u32 *)get_property(dp, "linux,bootx-linebytes", &len); 433 pp = get_property(dp, "linux,bootx-linebytes", &len);
434 if (pp == NULL) 434 if (pp == NULL)
435 pp = (u32 *)get_property(dp, "linebytes", &len); 435 pp = get_property(dp, "linebytes", &len);
436 if (pp && len == sizeof(u32)) 436 if (pp && len == sizeof(u32))
437 pitch = *pp; 437 pitch = *pp;
438 else 438 else
@@ -450,9 +450,9 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
450 * ranges and pick one that is both big enough and if possible encloses 450 * ranges and pick one that is both big enough and if possible encloses
451 * the "address" property. If none match, we pick the biggest 451 * the "address" property. If none match, we pick the biggest
452 */ 452 */
453 up = (u32 *)get_property(dp, "linux,bootx-addr", &len); 453 up = get_property(dp, "linux,bootx-addr", &len);
454 if (up == NULL) 454 if (up == NULL)
455 up = (u32 *)get_property(dp, "address", &len); 455 up = get_property(dp, "address", &len);
456 if (up && len == sizeof(u32)) 456 if (up && len == sizeof(u32))
457 addr_prop = *up; 457 addr_prop = *up;
458 458
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index 8ddb47a56b07..4acde4f7dbf8 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -1826,8 +1826,8 @@ static int __devinit riva_get_EDID_OF(struct fb_info *info, struct pci_dev *pd)
1826{ 1826{
1827 struct riva_par *par = info->par; 1827 struct riva_par *par = info->par;
1828 struct device_node *dp; 1828 struct device_node *dp;
1829 unsigned char *pedid = NULL; 1829 const unsigned char *pedid = NULL;
1830 unsigned char *disptype = NULL; 1830 const unsigned char *disptype = NULL;
1831 static char *propnames[] = { 1831 static char *propnames[] = {
1832 "DFP,EDID", "LCD,EDID", "EDID", "EDID1", "EDID,B", "EDID,A", NULL }; 1832 "DFP,EDID", "LCD,EDID", "EDID", "EDID1", "EDID,B", "EDID,A", NULL };
1833 int i; 1833 int i;
@@ -1835,14 +1835,13 @@ static int __devinit riva_get_EDID_OF(struct fb_info *info, struct pci_dev *pd)
1835 NVTRACE_ENTER(); 1835 NVTRACE_ENTER();
1836 dp = pci_device_to_OF_node(pd); 1836 dp = pci_device_to_OF_node(pd);
1837 for (; dp != NULL; dp = dp->child) { 1837 for (; dp != NULL; dp = dp->child) {
1838 disptype = (unsigned char *)get_property(dp, "display-type", NULL); 1838 disptype = get_property(dp, "display-type", NULL);
1839 if (disptype == NULL) 1839 if (disptype == NULL)
1840 continue; 1840 continue;
1841 if (strncmp(disptype, "LCD", 3) != 0) 1841 if (strncmp(disptype, "LCD", 3) != 0)
1842 continue; 1842 continue;
1843 for (i = 0; propnames[i] != NULL; ++i) { 1843 for (i = 0; propnames[i] != NULL; ++i) {
1844 pedid = (unsigned char *) 1844 pedid = get_property(dp, propnames[i], NULL);
1845 get_property(dp, propnames[i], NULL);
1846 if (pedid != NULL) { 1845 if (pedid != NULL) {
1847 par->EDID = pedid; 1846 par->EDID = pedid;
1848 NVTRACE("LCD found.\n"); 1847 NVTRACE("LCD found.\n");